query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Derivative of reproducing kernel on even subspaces of maximum degree N.
def even_kernel_der(mu, N): # Check that -1 <= mu <= 1 mu = np.clip(mu, -1, 1) #Derivatives of Legendre polynomials DlegPolys = legp_der(mu, N) coefs = 2*np.arange(0, N+1) + 1 ker = coefs[0::2]*DlegPolys[0::2] return ker.sum() / (4.0*np.pi)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n\n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs[0::2]*legPolys[0::2] \n\n return ker.sum() / (4.0*np.pi)", "def compute_gradient_kernel_respect_to_noise(n):\n\n return np.identity(n)", "def DDG(self, n, e, r, f):\n pre = (-e[:, None] + np.divide.outer((n - 1), r))**2\n pre -= np.divide.outer((n - 1), r**2)\n return pre*f", "def inv_funk_radon_even_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n\n coefs_num = 2*np.arange(0, N+1) + 1\n coefs_den = np.arange(2,N+1,2) * (np.arange(2,N+1,2) + 1)\n\n ker = coefs_num[2::2]*legPolys[2::2] / (p_at_zero[2::2] * coefs_den)\n\n return ker.sum() / (8.0*np.pi*np.pi)", "def delta(N):\n assert assert_odd(N) # Make sure kernel is odd\n X = np.zeros((N,N)) # Square matrix with all 0s\n middle = int(N/2) # Get the middle cell\n X[middle, middle] = 1\n return X", "def nd_kernel(n):\n n = int(n)\n total_size = 3**n\n mid_point = int((3**n - 1)/2)\n kern = np.zeros(total_size, dtype=bool)\n for i in range(n):\n kern[mid_point-3**i] = True\n kern[mid_point+3**i] = True\n new_shape = 3*np.ones(n, dtype=int) \n unnormed_kern = kern.reshape(new_shape)\n return unnormed_kern/unnormed_kern.sum()", "def DG(self, n, e, r, f):\n\n pre = -e[:, None] + np.divide.outer((n - 1), r)\n return pre*f", "def nth_derivative(f, x, n):\n h = 10e-2\n out_h = 1/(h**n)\n out = 0\n for k in range(0, n+1):\n out += (-1)**(k+n)*choose(n,k)*f(x +k*h)\n return out_h*out", "def kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n \n coefs = 2*np.arange(0, N+1) + 1\n \n ker = coefs*legPolys \n\n return ker.sum() / (4.0*np.pi)", "def kernel(n):\r\n return [(k, n - abs(k)) for k in range(-n, n + 1)]", "def ddh_per_dim(f, dim):\n diff_ops = [\n lambda f: self._kernel_op.apply_kernel_op_x(f, 'kddx'),\n lambda f: self._kernel_op.apply_kernel_op_y(f, 'kddy'),\n lambda f: self._kernel_op.apply_kernel_op_z(f, 'kddz', 'kddzsh'),\n ]\n return tf.nest.map_structure(lambda diff: diff / grid_spacing[dim]**2,\n diff_ops[dim](f))", "def sub_kernel(kernel, dim1, dim2):\n\n sub_kernel = kernel[dim1[0]:dim1[1],dim2[0]:dim2[1]]\n return sub_kernel", "def dilate_kernel(self, kernel, dilation):\n if dilation == 0:\n return kernel \n # inside padding based on the scaling law\n dilation = torch.tensor(dilation).float()\n delta = dilation%1\n\n d_in = torch.ceil(dilation**2).int()\n new_in = kernel.shape[2] + (kernel.shape[2]-1)*d_in\n\n d_h = torch.ceil(dilation).int()\n new_h = kernel.shape[3] + (kernel.shape[3]-1)*d_h\n\n d_w = torch.ceil(dilation).int()\n new_w = kernel.shape[4] + (kernel.shape[4]-1)*d_h\n\n new_kernel = torch.zeros(kernel.shape[0], kernel.shape[1], new_in, new_h, new_w)\n new_kernel[:,:,::(d_in+1),::(d_h+1), ::(d_w+1)] = kernel\n dilate_factor = 1\n \n new_kernel = F.pad(new_kernel, ((kernel.shape[4]-1)//2, (kernel.shape[4]-1)//2)*3)\n\n dilate_factor = (new_kernel.shape[-1] - 1 - (kernel.shape[4]-1)*(delta))/(new_kernel.shape[-1] - 1) \n\n grid = torch.meshgrid(torch.linspace(-1, 1, new_in)*(dilate_factor**2), \n torch.linspace(-1, 1, new_h)*dilate_factor, \n torch.linspace(-1, 1, new_w)*dilate_factor)\n\n grid = torch.cat([grid[2].unsqueeze(0).unsqueeze(-1), \n grid[1].unsqueeze(0).unsqueeze(-1), \n grid[0].unsqueeze(0).unsqueeze(-1)], dim = -1).repeat(kernel.shape[0],1,1,1,1)\n\n new_kernel = F.grid_sample(new_kernel, grid) \n \n return new_kernel[:,:,-kernel.shape[2]:]", "def d(i):\n if i==0:\n return 0\n elif (i%2)==0:\n return g(i-1) % N\n else:\n return g(i) % N", "def eg3(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg3_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n stable_feature = np.random.randn(n, p_stable)\n noise_feature_dependent = np.zeros([n, p_noise])\n noise_feature_independent = np.random.randn(n, p_noise)\n for i in range(p_noise):\n noise_feature_dependent[:, i] = stable_feature[:, i % p_stable] + stable_feature[:,\n (i + 1) % p_stable] + 2 * np.random.randn(\n n) # still need noise\n noise_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n noise_depend_label = np.concatenate([noise_depend_label] * p_noise, axis=1)\n noise_feature = np.where(noise_depend_label < depend_ratio, noise_feature_dependent, noise_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n linear_part = np.matmul(stable_feature[:, :linear_len], b[:linear_len, 0])\n nolinear_part = np.zeros([n, 1])\n for i in range(linear_len, b.shape[0]):\n temp = stable_feature[:, i % p_stable] * stable_feature[:, (i + 1) % p_stable] * b[i, 0]\n temp = temp.reshape(-1, 1)\n nolinear_part += temp\n\n Y = linear_part.reshape(-1, 1) + nolinear_part + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg3'\n return data\n\n data_train = eg3_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg3_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def perfect_sweep(N):\n\n m = np.arange(0, np.ceil(N / 2 + 1))\n P_half = np.exp(-1j * 2 * np.pi / N * m ** 2)\n return np.real(np.fft.irfft(P_half, n=N))", "def evolve_system(self, x, n, k, gamma):\n temp = tf.pow(k, n)/(tf.pow(x, n)+tf.pow(k,n))\n # dxdt = tf.manip.roll(temp, shift = -1, axis = 1) - gamma*x # v1.6+\n dxdt = tf.concat([ tf.reshape(temp[:, -1], [-1, 1]),\n temp[:,:-1]], axis=1) - gamma*x # v1.5\n dxdt = tf.convert_to_tensor(dxdt, dtype = tf.float32, name = \"dxdt\")\n return dxdt", "def _eunn_loop(state, capacity, diag_vec_list, off_vec_list, diag, fft):\n i = 0\n def layer_tunable(x, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n\n diag = tf.multiply(x, diag_vec)\n off = tf.multiply(x, off_vec)\n\n def even_input(off, size):\n\n def even_s(off, size):\n off = tf.reshape(off, [-1, size//2, 2])\n off = tf.reshape(tf.reverse(off, [2]), [-1, size])\n return off\n\n def odd_s(off, size):\n off, helper = tf.split(off, [size-1, 1], 1)\n size -= 1\n off = even_s(off, size)\n off = tf.concat([off, helper], 1)\n return off\n\n off = tf.cond(tf.equal(tf.mod(size, 2), 0), lambda: even_s(off, size), lambda: odd_s(off, size))\n return off\n\n def odd_input(off, size):\n helper, off = tf.split(off, [1, size-1], 1)\n size -= 1\n off = even_input(off, size)\n off = tf.concat([helper, off], 1)\n return off\n\n size = int(off.get_shape()[1])\n off = tf.cond(tf.equal(tf.mod(i, 2), 0), lambda: even_input(off, size), lambda: odd_input(off, size))\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n def layer_fft(state, i):\n\n diag_vec = diag_vec_list.read(i)\n off_vec = off_vec_list.read(i)\n diag = tf.multiply(state, diag_vec)\n off = tf.multiply(state, off_vec)\n\n hidden_size = int(off.get_shape()[1])\n # size = 2**i\n dist = capacity - i\n normal_size = (hidden_size // (2**dist)) * (2**(dist-1))\n normal_size *= 2\n extra_size = tf.maximum(0, (hidden_size % (2**dist)) - (2**(dist-1)))\n hidden_size -= normal_size\n\n def modify(off_normal, dist, normal_size):\n off_normal = tf.reshape(tf.reverse(tf.reshape(off_normal, [-1, normal_size//(2**dist), 2, (2**(dist-1))]), [2]), [-1, normal_size])\n return off_normal\n\n def do_nothing(off_normal):\n return off_normal\n\n off_normal, off_extra = tf.split(off, [normal_size, hidden_size], 1)\n off_normal = tf.cond(tf.equal(normal_size, 0), lambda: do_nothing(off_normal), lambda: modify(off_normal, dist, normal_size))\n helper1, helper2 = tf.split(off_extra, [hidden_size-extra_size, extra_size], 1)\n off_extra = tf.concat([helper2, helper1], 1)\n off = tf.concat([off_normal, off_extra], 1)\n\n layer_output = diag + off\n i += 1\n\n return layer_output, i\n\n if fft:\n layer_function = layer_fft\n else:\n layer_function = layer_tunable\n output, _ = tf.while_loop(lambda state, i: tf.less(i, capacity), layer_function, [state, i])\n\n if not diag is None:\n output = tf.multiply(output, diag)\n\n\n return output", "def dpp_sw(kernel_matrix, window_size=3, max_length=14, epsilon=1E-10):\r\n item_size = kernel_matrix.shape[0]\r\n v = np.zeros((max_length, max_length))\r\n cis = np.zeros((max_length, item_size))\r\n di2s = np.copy(np.diag(kernel_matrix))\r\n selected_items = list()\r\n selected_item = np.argmax(di2s)\r\n selected_items.append(selected_item)\r\n window_left_index = 0\r\n while len(selected_items) < max_length:\r\n k = len(selected_items) - 1\r\n ci_optimal = cis[window_left_index:k, selected_item]\r\n di_optimal = math.sqrt(di2s[selected_item])\r\n v[k, window_left_index:k] = ci_optimal\r\n v[k, k] = di_optimal\r\n elements = kernel_matrix[selected_item, :]\r\n eis = (elements - np.dot(ci_optimal, cis[window_left_index:k, :])) / di_optimal\r\n cis[k, :] = eis\r\n di2s -= np.square(eis)\r\n if len(selected_items) >= window_size:\r\n window_left_index += 1\r\n for ind in range(window_left_index, k + 1):\r\n t = math.sqrt(v[ind, ind] ** 2 + v[ind, window_left_index - 1] ** 2)\r\n c = t / v[ind, ind]\r\n s = v[ind, window_left_index - 1] / v[ind, ind]\r\n v[ind, ind] = t\r\n v[ind + 1:k + 1, ind] += s * v[ind + 1:k + 1, window_left_index - 1]\r\n v[ind + 1:k + 1, ind] /= c\r\n v[ind + 1:k + 1, window_left_index - 1] *= c\r\n v[ind + 1:k + 1, window_left_index - 1] -= s * v[ind + 1:k + 1, ind]\r\n cis[ind, :] += s * cis[window_left_index - 1, :]\r\n cis[ind, :] /= c\r\n cis[window_left_index - 1, :] *= c\r\n cis[window_left_index - 1, :] -= s * cis[ind, :]\r\n di2s += np.square(cis[window_left_index - 1, :])\r\n di2s[selected_item] = -np.inf\r\n selected_item = np.argmax(di2s)\r\n if di2s[selected_item] < epsilon:\r\n break\r\n selected_items.append(selected_item)\r\n return selected_items", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def inv_funk_radon_kernel(mu, N):\n\n # Check that -1 <= mu <= 1\n mu = np.clip(mu, -1, 1)\n\n # Need Legendre polynomials\n legPolys = legp(mu, N)\n p_at_zero = legp(0, N)\n coefs = 2*np.arange(0, N+1, 2) + 1\n ker = coefs*legPolys[::2]/p_at_zero[::2]\n return ker.sum() / (8*np.pi)", "def get_derivative(self, model, params, n):\n params1 = np.array(params)\n params2 = np.array(params)\n\n params1[n] += self.eps\n params2[n] -= self.eps\n\n res1 = model.run(params1)\n res2 = model.run(params2)\n\n d = (res1 - res2) / (2 * self.eps)\n\n return d.ravel()", "def grad_n(f: FlowFieldVal, dim: int, h: float) -> FlowFieldVal:\n if dim == 0:\n df = kernel_op.apply_kernel_op_x(f, 'kDx')\n elif dim == 1:\n df = kernel_op.apply_kernel_op_y(f, 'kDy')\n elif dim == 2:\n df = kernel_op.apply_kernel_op_z(f, 'kDz', 'kDzsh')\n else:\n raise ValueError('Unsupport dimension: {}'.format(dim))\n\n return [df_i / (2.0 * h) for df_i in df]", "def evolve(self, x, n, k, gamma):\n dxdt = tf.pow(x, n)/(tf.pow(x, n)+tf.pow(k,n)) - gamma*x\n return dxdt", "def softmax_derivative(x):\n der = derivative(softmax,x,dx=1e-9)\n return der", "def backward(g, N, K):\n\tb = np.zeros((N,K))\n\tfor t in reversed(xrange(0,N-1)):\n\t\tby = b[t+1,:]\n\t\tfor yp in xrange(K):\n\t\t\tb[t,yp] = misc.logsumexp(by + g[t,yp,:])\n\treturn b", "def ogfft2(x, N):\n x_p = brc(x)\n PI = np.pi\n for ii in np.arange(1,int(np.log2(N)) + 1):\n M = int(2**ii)\n w_M = np.exp(1j*((2*PI)/M))\n for kk in np.arange(0,N,M):\n w = 1\n m = int(M/2)\n for jj in np.arange(m):\n t = w*x_p[kk + jj + m]\n u = x_p[kk + jj]\n x_p[kk + jj] = u + t\n x_p[kk + jj + m] = u - t\n w = w*w_M\n return x_p", "def F_std(d, N):\n # memoize specht() and weyl() results (but only for current call)\n specht_mem, weyl_mem = memoize(specht), memoize(weyl)\n\n return sum(\n d ** (-N - 2)\n * sum(sqrt(specht_mem(mu) * weyl_mem(d, mu)) for mu in box_added(alpha, d)) ** 2\n for alpha in Partitions(n=N - 1, max_length=d)\n )", "def eg4(N_train=1000, N_test=500, depend_ratio_train=0.8, depend_ratio_test=0.2, feature_num=10, stable_ratio=0.4):\n\n def eg4_kernel(n, p, stable_ratio=0.4, depend_ratio=0.8):\n p_stable = int(p * stable_ratio)\n p_noise = p - p_stable\n noise_feature = np.random.randn(n, p_noise)\n stable_feature_dependent = np.zeros([n, p_stable])\n stable_feature_independent = np.random.randn(n, p_stable)\n for i in range(p_stable):\n stable_feature_dependent[:, i] = noise_feature[:, i % p_noise] + noise_feature[:,\n (i + 1) % p_noise] + 2 * np.random.randn(\n n) # still need noise\n stable_depend_label = np.random.uniform(0, 1, n).reshape(-1, 1)\n stable_depend_label = np.concatenate([stable_depend_label] * p_stable, axis=1)\n stable_feature = np.where(stable_depend_label < depend_ratio, stable_feature_dependent,\n stable_feature_independent)\n\n b = np.zeros([p_stable, 1])\n linear_len = int(p_stable / 2)\n\n for i in range(linear_len): # linear part\n b[i, 0] = (-1) ** i * (i % 3 + 1) * p / 3\n for i in range(linear_len, b.shape[0]): # nonlinear part\n b[i, 0] = p / 2\n\n Y = np.matmul(stable_feature, b) + np.random.randn(n, 1)\n\n data = {}\n data['stable'] = stable_feature\n data['noise'] = noise_feature\n data['Y'] = Y\n data['params'] = b\n data['kernel'] = 'eg4'\n return data\n\n data_train = eg4_kernel(n=N_train, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_train)\n data_test = eg4_kernel(n=N_test, p=feature_num, stable_ratio=stable_ratio, depend_ratio=depend_ratio_test)\n return data_train, data_test", "def derivative_ex(dirichl_space, neumann_space, ep_in, ep_ex, kappa, operator_assembler):\n phi_id = sparse.identity(dirichl_space, dirichl_space, dirichl_space)\n dph_id = sparse.identity(neumann_space, neumann_space, neumann_space)\n ep = ep_ex/ep_in\n\n dF = laplace.double_layer(dirichl_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n dP = modified_helmholtz.double_layer(dirichl_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n B = 1/ep * dF - dP\n\n F = laplace.single_layer(neumann_space, dirichl_space, dirichl_space, assembler=operator_assembler)\n P = modified_helmholtz.single_layer(neumann_space, dirichl_space, dirichl_space, kappa, assembler=operator_assembler)\n A = F - P\n\n ddF = laplace.hypersingular(dirichl_space, neumann_space, neumann_space, assembler=operator_assembler)\n ddP = modified_helmholtz.hypersingular(dirichl_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n D = 1/ep * (ddP - ddF)\n\n dF0 = laplace.adjoint_double_layer(neumann_space, neumann_space, neumann_space, assembler=operator_assembler)\n dP0 = modified_helmholtz.adjoint_double_layer(neumann_space, neumann_space, neumann_space, kappa, assembler=operator_assembler)\n C = dF0 - 1.0/ep*dP0\n\n A_sys = bempp.api.BlockedOperator(2, 2)\n A_sys[0, 0] = (0.5*(1.0 + (1.0/ep))*phi_id) + B\n A_sys[0, 1] = -A\n A_sys[1, 0] = D\n A_sys[1, 1] = (0.5*(1.0 + (1.0/ep))*dph_id) - C\n\n return A_sys" ]
[ "0.63502026", "0.60510343", "0.5926386", "0.59101856", "0.5898114", "0.56899774", "0.56315917", "0.5616251", "0.558586", "0.55735755", "0.55652493", "0.55386996", "0.5501476", "0.54612076", "0.54292554", "0.54179746", "0.54039854", "0.5398605", "0.5376709", "0.53656363", "0.5358466", "0.53420204", "0.53278977", "0.53239286", "0.5309184", "0.5269692", "0.52427894", "0.5233143", "0.52239406", "0.5203497" ]
0.66818523
0
Returns truncated iterated logarithm y = log( log(x) ) where if x<delta, x = delta and if 1delta < x, x = 1delta.
def ilog(x,delta): if(delta < x and x < 1.0 - delta): return np.log( -np.log(x) ) elif(x < delta): return np.log( -np.log(delta) ) else: return np.log( -np.log(1.0 - delta) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def logit(x: torch.Tensor, eps=1e-5) -> torch.Tensor:\n x = torch.clamp(x, eps, 1.0 - eps)\n return torch.log(x / (1.0 - x))", "def safelog(x):\n #return np.log(x)\n return np.log(np.clip(x,floor,np.inf))", "def diff_log(x):\n \n return np.diff(np.log(x)),np.log(x)[0]", "def diff_log(x):\n\n return np.diff(np.log(x)),np.log(x)[0]", "def log_transform(x, epsilon = 1e-4):\n if x.min() < 0: epsilon += np.abs(x.min())\n return (x.fillna(0).astype(float) + epsilon).apply(np.log)", "def log(amount, start, stop, truncated, sequence):\n ratio = 10 ** (len(str(start)) + 1)\n for x in range(start, amount):\n # y = abs(round(math.log(x, 1)))\n y = abs(round(math.log1p(x) * ratio * 5))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence", "def logarithm(x, eps=10e-5):\n if abs(x) >= 1:\n return float('Nan')\n\n pre_x = x\n tmp = x ** 2\n sign = -1\n i = 2\n res_x = pre_x + sign * tmp / i\n\n while abs(res_x - pre_x) > eps:\n sign = -sign\n i += 1\n tmp *= x\n pre_x = res_x\n res_x += sign * tmp / i\n\n return res_x", "def log(self, x, base=2):\n if x == 0:\n return 0\n return math.log(x, base)", "def logaddexp(x: torch.Tensor, y: torch.Tensor) -> torch.Tensor:\n return torch.max(x, y) + torch.log(1 + torch.exp(-torch.abs(y - x)))", "def lognormalize(x, temp = 1):\n if type(x) is list: x = np.array(x)\n\n x = x - np.max(x)\n # anneal\n xp = np.power(np.exp(x), temp)\n return xp / xp.sum()", "def ln(x):\n return log(x, const.e)", "def log(x, base=math.e):\n return 0.0", "def log10_inplace(a):", "def safe_log(x):\n safe_x = jnp.where(x > 0.0, x, jnp.ones_like(x))\n return jnp.where(x > 0.0, jnp.log(safe_x), jnp.zeros_like(x))", "def logaddexp(X, Y):\n XY_max = T.maximum(X, Y)\n XY_min = T.minimum(X, Y)\n return XY_max + T.log1p(T.exp(XY_min - XY_max))", "def my_log(num):\n\n if num == 0.0:\n return -9999999999\n return math.log(num)", "def smart_log(self, value: float) -> float:\n if value > 0:\n return math.log(value, self.log_scale)\n elif value == 0:\n return 0\n elif value < 0:\n return -(math.log(abs(value), self.log_scale))", "def log(self, base):\n\n\t\tvalues = map(lambda x: x > 0, self.val)\n\t\tif not all(values):\n\t\t\traise ValueError(\"Non-positive number encountered in log.\")\n\t\telse:\n\t\t\tval = np.array([np.math.log(v, base) for v in self.val])\n\t\t\tif len(self.der.shape):\n\t\t\t\tto_multiply = 1 / np.multiply(np.log(base), self.val)\n\t\t\t\tto_multiply = np.expand_dims(to_multiply, 1) if len(self.der.shape) > len(to_multiply.shape) else to_multiply\n\t\t\t\tder = np.multiply(to_multiply, self.der)\n\t\t\telse:\n\t\t\t\tder = None\n\t\treturn Var(val, der)", "def lg(x: Union[int, float]) -> float:\n res = 0.0\n try:\n res = log(x, 2)\n except ValueError:\n pass\n return res", "def log10(x):\n return 0.0", "def _signed_log(x, base):\n return numpy.sign(x) * numpy.log10(numpy.abs(x)) / numpy.log10(base)", "def log2(x: float) -> float:\n return math.log2(x) if x > 0 else 0", "def log_inplace(a):", "def _loglike(self, y, f):\n ll = y * tf.log(pos(f)) + (1 - y) * tf.log(pos(1 - f))\n return ll", "def logit_link(x):\n\n return 1 / (1 + math.exp(-0.05 * x))\n # return 1 / (1 + math.exp(-0.01 * x))", "def logtrapz(logy, x=None, dx=1.0):\n n_intvls = logy.shape[0]-1\n loghalf = log(.5)\n if x is not None:\n logdel = x[1:] - x[0:-1]\n else:\n logdel = ones(n_intvls)*dx\n logdel = log(logdel)\n lo = logy[0] + loghalf + logdel[0]\n hi = logy[-1] + loghalf + logdel[-1]\n lsum = logaddexp(lo, hi)\n for i in xrange(1,n_intvls):\n lsum = logaddexp(lsum, logy[i] + logdel[i])\n return lsum", "def log_prior(x):\n logp = (-0.5 * x.pow(2) - torch.tensor(2 * math.pi).sqrt().log()).sum(dim=1)\n return logp", "def _logsumexp(x):\n # Search maximum.\n max_x = None\n length = len(x)\n for i in range(length):\n if max_x is None or x[i] > max_x:\n max_x = x[i]\n\n # Calculate sum of exponential differences.\n sum_exp = 0\n for i in range(length):\n diff = x[i] - max_x\n sum_exp += np.exp(diff)\n\n log_sum_exp = max_x + np.log(sum_exp)\n\n return log_sum_exp", "def log2_inplace(a):", "def log(base, real):\n return math.log(real, base)" ]
[ "0.72744215", "0.7240807", "0.7214393", "0.72042274", "0.716658", "0.711701", "0.6767486", "0.6693635", "0.6662013", "0.6657874", "0.66419184", "0.66316617", "0.66109407", "0.6556733", "0.6541151", "0.65251803", "0.6519674", "0.6498009", "0.645293", "0.6449499", "0.64343315", "0.64312285", "0.64077866", "0.6386402", "0.63583314", "0.63548434", "0.63365805", "0.63337666", "0.63309705", "0.6303123" ]
0.8147607
0
Create a 3D rotation matrix for rotation about xaxis. (1 0 0 ) R(theta) = (0 cos(x) sin(x)) (0 sin(x) cos(x))
def rotation3Dx(theta): rmat = np.zeros((3,3)) rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0 rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta) rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta) return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def rotation3D_x(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[1.0, 0.0, 0.0], [0.0, c, -s], [0.0, s, c]])", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r", "def rotation3Dz(theta):\n rmat = np.zeros((3,3))\n rmat[0,0] = rmat[1,1] = np.cos(theta)\n rmat[0,1] = np.sin(theta)\n rmat[1,0] = -rmat[0,1]\n rmat[2,2] = 1\n return rmat", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotate3(x, angle_x=0, angle_y=0, angle_z=0, origin=(0, 0, 0)):\n origin = np.asarray(origin)\n x = np.asarray(x) - origin\n r = rotation_matrix3(angle_x, angle_y, angle_z)\n return x.dot(r.T) + origin", "def rotate(self, x=0, y=0, z=0):\n\t\tquaternion = R.from_euler('xyz', [x, y, z], degrees=True)\n\t\trotation_matrix = np.array(quaternion.as_matrix())\n\t\trotation_matrix = np.pad(rotation_matrix, [(0, 1), (0, 1)], mode='constant')\n\t\trotation_matrix[3,3] = 1\n\n\t\tself.matrix = np.matmul(self.matrix, rotation_matrix)", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def get_3drotation_matrix(axis, angle):\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat", "def x_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[1, 0, 0],\n [0, cos_t, -sin_t],\n [0, sin_t, cos_t]])", "def rotation_matrix( axis, angle ):\n\n # Trig factors.\n ca = cos(angle)\n sa = sin(angle)\n C = 1 - ca\n\n # Depack the axis.\n x, y, z = tuple( axis )\n\n # Multiplications (to remove duplicate calculations).\n xs = x*sa\n ys = y*sa\n zs = z*sa\n xC = x*C\n yC = y*C\n zC = z*C\n xyC = x*yC\n yzC = y*zC\n zxC = z*xC\n\n # Update the rotation matrix.\n matrix \t = np.zeros( (3,3) )\n matrix[0, 0] = x*xC + ca\n matrix[0, 1] = xyC - zs\n matrix[0, 2] = zxC + ys\n matrix[1, 0] = xyC + zs\n matrix[1, 1] = y*yC + ca\n matrix[1, 2] = yzC - xs\n matrix[2, 0] = zxC - ys\n matrix[2, 1] = yzC + xs\n matrix[2, 2] = z*zC + ca\n return matrix", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def rotation(self, angle, axis):\r\n\r\n sqr_a = axis.x*axis.x\r\n sqr_b = axis.y*axis.y\r\n sqr_c = axis.z*axis.z\r\n len2 = sqr_a+sqr_b+sqr_c\r\n\r\n k2 = math.cos(angle)\r\n k1 = (1.0-k2)/len2\r\n k3 = math.sin(angle)/math.sqrt(len2)\r\n k1ab = k1*axis.x*axis.y\r\n k1ac = k1*axis.x*axis.z\r\n k1bc = k1*axis.y*axis.z\r\n k3a = k3*axis.x\r\n k3b = k3*axis.y\r\n k3c = k3*axis.z\r\n\r\n return mat4( k1*sqr_a+k2, k1ab-k3c, k1ac+k3b, 0.0,\r\n k1ab+k3c, k1*sqr_b+k2, k1bc-k3a, 0.0,\r\n k1ac-k3b, k1bc+k3a, k1*sqr_c+k2, 0.0,\r\n 0.0, 0.0, 0.0, 1.0)", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def zx_rotation(vector,theta):\r\n R = np.array([[np.cos(theta),0,np.sin(theta)],\r\n [0,1,0],\r\n [-np.sin(theta),0,np.cos(theta)]\r\n ])\r\n return np.dot(R,vector)", "def _rot(axis, angle):\n if axis == 1:\n return Matrix([[1, 0, 0],\n [0, cos(angle), -sin(angle)],\n [0, sin(angle), cos(angle)]])\n elif axis == 2:\n return Matrix([[cos(angle), 0, sin(angle)],\n [0, 1, 0],\n [-sin(angle), 0, cos(angle)]])\n elif axis == 3:\n return Matrix([[cos(angle), -sin(angle), 0],\n [sin(angle), cos(angle), 0],\n [0, 0, 1]])", "def rotation_matrix_xyz(axis, angle, angle_dim):\n assert angle_dim is \"deg\" or angle_dim is \"rad\"\n assert axis is \"x\" or axis is \"y\" or axis is \"z\"\n x = 0\n y = 0\n z = 0\n\n if angle_dim is \"deg\":\n a = np.deg2rad(angle)\n else:\n a = angle\n\n if axis is \"x\":\n x = 1\n y = 0\n z = 0\n if axis is \"y\":\n x = 0\n y = 1\n z = 0\n if axis is \"z\":\n x = 0\n y = 0\n z = 1\n\n s = np.sin(a)\n c = np.cos(a)\n rotation_matrix = np.array([[c + x ** 2 * (1 - c), x * y * (1 - c) - z * s, x * z * (1 - c) + y * s],\n [y * x * (1 - c) + z * s, c + y ** 2 * (1 - c), y * z * (1 - c) - x * s],\n [z * x * (1 - c) - y * s, z * y * (1 - c) + x * s, c + z ** 2 * (1 - c)]])\n\n return rotation_matrix", "def rotationMatrix(self):\n\n R = Compute3DRotationMatrix(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R", "def rotation_matrix(angle, axis):\n about_z = rotation_about_z(angle)\n z_to_axis = z_to_vector(axis)\n axis_to_z = np.linalg.inv(z_to_axis)\n return reduce(np.dot, [z_to_axis, about_z, axis_to_z])", "def RotationX(theta):\n\n return Rotation([1., 0., 0.], theta)", "def rot_x(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[1, 0, 0],\n [0, np.cos(theta_rad), -np.sin(theta_rad)],\n [0, np.sin(theta_rad), np.cos(theta_rad)]]\n return np.matrix(rotation_matrix)", "def rotateAlignXform(self):\n r = np.concatenate(([self.x], [self.y], [self.z]), 0)\n r = np.concatenate((r, np.array([[0,0,0]])), 0)\n r = np.concatenate((r, np.array([0,0,0,1]).reshape(-1,1)), 1)\n return r", "def rotation3Dy(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = np.cos(theta), 0.0, -np.sin(theta)\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, 1.0, 0.0\n rmat[2,0], rmat[2,1], rmat[2,2] = np.sin(theta), 0.0, np.cos(theta)\n\n return rmat", "def rotateX(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n y = self.y * cosa - self.z * sina\r\n z = self.y * sina + self.z * cosa\r\n return Point3D(self.x, y, z)", "def so3_matrix_generator(axis, theta):\n theta = np.asarray(theta)\n\n theta = theta[:, None, None]\n x, y, z = axis.T\n zero = np.zeros_like(x)\n k = np.stack([zero, -z, y, z, zero, -x, -y, x, zero], 1).reshape((-1, 3, 3))\n rot = np.eye(3)[None] + np.sin(theta) * k + (1 - np.cos(theta)) * k @ k\n\n return rot" ]
[ "0.80429703", "0.77990216", "0.77074045", "0.76701725", "0.7432882", "0.7401681", "0.73251915", "0.7267913", "0.71938413", "0.71556383", "0.70411855", "0.7036028", "0.70312065", "0.7028589", "0.7012928", "0.69417447", "0.692393", "0.68872285", "0.6844814", "0.6838681", "0.68080956", "0.6798901", "0.67988205", "0.6758144", "0.6729803", "0.67192096", "0.66868836", "0.66717553", "0.6632204", "0.6627981" ]
0.78315115
1
Create a 3D rotation matrix for rotation about zaxis. ( cos(x) sin(x) 0) R(theta) = (sin(x) cos(x) 0) ( 0 0 1)
def rotation3Dz(theta): rmat = np.zeros((3,3)) rmat[0,0] = rmat[1,1] = np.cos(theta) rmat[0,1] = np.sin(theta) rmat[1,0] = -rmat[0,1] rmat[2,2] = 1 return rmat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_rotate_3d_z(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_z = -deg * pi/180\n c_z = cos(rad_z)\n s_z = sin(rad_z)\n return np.matrix([[c_z, -s_z, 0], [s_z, c_z, 0], [0, 0, 1]])", "def rotation3D_z(angle: float) -> np.array:\n c = np.cos(angle)\n s = np.sin(angle)\n return np.array([[c, -s, 0.0], [s, c, 0.0], [0.0, 0.0, 1.0]])", "def rotation_matrix3(angle_x=0, angle_y=0, angle_z=0):\n if angle_x != 0:\n c, s = cos(angle_x), sin(angle_x)\n r = np.array([[1, 0, 0], [0, c, -s], [0, s, c]])\n else:\n r = np.identity(3)\n\n if angle_y != 0:\n c, s = cos(angle_y), sin(angle_y)\n r = r.dot(np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]))\n\n if angle_z != 0:\n c, s = cos(angle_z), sin(angle_z)\n r = r.dot(np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]))\n\n return r", "def z_rotmat(theta):\n cos_t = np.cos(theta)\n sin_t = np.sin(theta)\n return np.array([[cos_t, -sin_t, 0],\n [sin_t, cos_t, 0],\n [0, 0, 1]])", "def rot_z(theta):\n theta_rad = np.radians(theta)\n rotation_matrix = [[np.cos(theta_rad), -np.sin(theta_rad), 0],\n [np.sin(theta_rad), np.cos(theta_rad), 0],\n [0, 0, 1]]\n return np.matrix(rotation_matrix)", "def rotation_mat(self) -> np.ndarray:\n rot = np.zeros((3, 3))\n\n txx = 2 * self.x * self.x\n tyy = 2 * self.y * self.y\n tzz = 2 * self.z * self.z\n twx = 2 * self.w * self.x\n twy = 2 * self.w * self.y\n twz = 2 * self.w * self.z\n txy = 2 * self.x * self.y\n txz = 2 * self.x * self.z\n tyz = 2 * self.y * self.z\n\n rot[0, 0] = 1. - tyy - tzz\n rot[0, 1] = txy - twz\n rot[0, 2] = txz + twy\n rot[1, 0] = txy + twz\n rot[1, 1] = 1. - txx - tzz\n rot[1, 2] = tyz - twx\n rot[2, 0] = txz - twy\n rot[2, 1] = tyz + twx\n rot[2, 2] = 1. - txx - tyy\n\n return rot", "def rotate_z(angle):\n log.dev(\"lib.mathp.rotate_z is deprecated. Use lib.rotation.R3 instead.\")\n\n cosA = np.cos(angle)\n sinA = np.sin(angle)\n R = np.array([[cosA, sinA, 0], [-sinA, cosA, 0], [0, 0, 1]])\n return R", "def rot_z(angle):\n sangle = math.sin(angle)\n cangle = math.cos(angle)\n rz = np.array([[cangle, sangle, 0.0],\n [-sangle, cangle, 0.0],\n [0.0, 0.0, 1.0]])\n return rz", "def create_rotation_matrix_3d(angles) -> np.array:\n\n mat1 = np.array([[1., 0., 0.],\n [0., math.cos(angles[0]), math.sin(angles[0])],\n [0., -math.sin(angles[0]), math.cos(angles[0])]],\n dtype='float')\n\n mat2 = np.array([[math.cos(angles[1]), 0., -math.sin(angles[1])],\n [0., 1., 0.],\n [math.sin(angles[1]), 0., math.cos(angles[1])]],\n dtype='float')\n\n mat3 = np.array([[math.cos(angles[2]), math.sin(angles[2]), 0.],\n [-math.sin(angles[2]), math.cos(angles[2]), 0.],\n [0., 0., 1.]],\n dtype='float')\n\n mat = (mat1 @ mat2) @ mat3\n return mat", "def rotation3Dx(theta):\n rmat = np.zeros((3,3))\n rmat[0,0], rmat[0,1], rmat[0,2] = 1.0, 0.0, 0.0\n rmat[1,0], rmat[1,1], rmat[1,2] = 0.0, np.cos(theta), np.sin(theta)\n rmat[2,0], rmat[2,1], rmat[2,2] = 0.0, -np.sin(theta), np.cos(theta)\n \n return rmat", "def matrix_rotate_3d_x(deg: float) -> np.matrix:\n from numpy import cos, sin, pi\n rad_x = -deg * pi/180\n c_x = cos(rad_x)\n s_x = sin(rad_x)\n return np.matrix([[1, 0, 0], [0, c_x, -s_x], [0, s_x, c_x]])", "def random_rotation_matrix():\n\n x = np.random.uniform(size=3)\n theta = x[0]*2*math.pi\n phi = x[1]*2*math.pi\n z = x[2]*2\n\n r = math.sqrt(z)\n vx = math.sin(phi)*r\n vy = math.cos(phi)*r\n vz = math.sqrt(2.0-z)\n\n st = math.sin(theta)\n ct = math.cos(theta)\n\n sx = vx*ct-vy*st\n sy = vx*st+vy*ct\n\n return np.array([[vx*sx-ct, vx*sy-st, vx*vz],\n [vy*sx+st, vy*sy-ct, vy*vz],\n [vz*sx,vz*sy,1.0-z]])", "def rotate_z(self, angle):\n angle *= np.pi / 180\n return self.transform(np.matrix([[np.cos(angle), -np.sin(angle), 0],\n [np.sin(angle), np.cos(angle), 0],\n [0, 0, 1]]))", "def generate_rotation_matrix(x_angle, y_angle, z_angle):\n return np.array([\n [1, 0, 0],\n [0, np.cos(x_angle), -np.sin(x_angle)],\n [0, np.sin(x_angle), np.cos(x_angle)],\n ]).dot([\n [np.cos(y_angle), 0, np.sin(y_angle)],\n [0, 1, 0],\n [-np.sin(y_angle), 0, np.cos(y_angle)],\n ]).dot([\n [np.cos(z_angle), -np.sin(z_angle), 0],\n [np.sin(z_angle), np.cos(z_angle), 0],\n [0, 0, 1],\n ]).tolist()", "def rotation_matrix3(axis, theta):\n R = np.eye(3)\n c = math.cos(theta)\n s = math.sin(theta)\n a1 = (axis + 1) % 3\n a2 = (axis + 2) % 3\n R[a1, a1] = c\n R[a1, a2] = -s\n R[a2, a1] = s\n R[a2, a2] = c\n return np.matrix(R)", "def rotation3(size=None): # noqa\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n theta, phi, z = 2. * random((3, 1) + size)\n theta *= pi # Initial rotation angle about z-axis.\n phi *= pi # Angle in xy plane for tilt of z-axis.\n # Magnitude of tilt is random variable z.\n r = sqrt(z)\n v = concatenate((r*sin(phi), r*cos(phi), sqrt(2.-z)))\n st, ct = sin(theta), cos(theta)\n s = concatenate((v[0]*ct - v[1]*st, v[0]*st + v[1]*ct))\n m = v[:, newaxis].repeat(3, axis=1)\n m[:, :2] *= s\n m[0, :2] -= concatenate((ct, st))\n m[1, :2] += concatenate((st, -ct))\n m[:2, 2] *= v[2]\n m[2, 2] = 1. - z # Equals v[2]*v[2] - 1.\n if m.ndim > 2:\n m = transpose(m, roll(range(m.ndim), -2)).copy()\n return m", "def transform3D(x: float, y: float, z: float, R: np.array) -> np.array:\n T = np.zeros((4, 4))\n T[:3, :3] = R\n T[:, 3] = [x, y, z, 1.0]\n\n return T", "def rotation_matrix_3x3_axis(angle, axis):\n assert axis.lower() in ['x','y','z']\n assert -180.0 <= angle <= 180.0\n angle_r = angle * (np.pi / 180.0)\n sa = np.sin(angle_r)\n ca = np.cos(angle_r)\n\n if axis == 'x':\n R = np.array([ [1, 0, 0],\n [0, ca, -sa],\n [0, sa, ca],\n ])\n elif axis == 'y':\n R = np.array([ [ca, 0, sa],\n [0, 1, 0],\n [-sa, 0, ca],\n ])\n elif axis == 'z':\n R = np.array([ [ca, -sa, 0],\n [sa, ca, 0],\n [0, 0, 1],\n ])\n return R", "def RotationMatrix(theta, x, y, z, point=None):\n\treturn mach.rotation_matrix(theta, [x, y, z])", "def rotation_matrix(rx, ry, rz):\n # Convert from degrees to radians.\n rx = np.pi * rx / 180\n ry = np.pi * ry / 180\n rz = np.pi * rz / 180\n\n # Pre-compute sine and cosine of angles.\n cx, cy, cz = np.cos([rx, ry, rz])\n sx, sy, sz = np.sin([rx, ry, rz])\n\n # Set up euler rotations.\n Rx = np.array([[1, 0, 0, 0],\n [0, cx, -sx, 0],\n [0, sx, cx, 0],\n [0, 0, 0, 1]])\n\n Ry = np.array([[cy, 0, sy, 0],\n [0, 1, 0, 0],\n [-sy, 0, cy, 0],\n [0, 0, 0, 1]])\n\n Rz = np.array([[cz, -sz, 0, 0],\n [sz, cz, 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1]])\n\n return Rz.dot(Ry.dot(Rx))", "def rotateZ(self, angle):\r\n rad = angle * math.pi / 180\r\n cosa = math.cos(rad)\r\n sina = math.sin(rad)\r\n x = self.x * cosa - self.y * sina\r\n y = self.x * sina + self.y * cosa\r\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = angle * math.pi / 180\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def rotateZ(self, angle):\n rad = math.radians(angle)\n cosa = math.cos(rad)\n sina = math.sin(rad)\n x = self.x * cosa - self.y * sina\n y = self.x * sina + self.y * cosa\n return Point3D(x, y, self.z)", "def RotationZ(theta):\n\n return Rotation([0., 0., 1.], theta)", "def rotation(x,y,z):\r\n phi = np.arctan(z/sqrt(x**2+y**2))\r\n lamb = np.arctan2(y,x)\r\n G = np.array([[-sin(lamb), cos(lamb), 0],\r\n [-sin(phi) * cos(lamb), -sin(phi) * sin(lamb), cos(phi)],\r\n [cos(phi) * cos(lamb), cos(phi) * sin(lamb), sin(phi)]])\r\n return (G)", "def get_3drotation_matrix(axis, angle):\n angle = angle #*-1\n norm = np.linalg.norm(np.array(axis))\n if norm > 0:\n axis /= norm\n ax, ay, az = axis[0], axis[1], axis[2]\n cos, sin = np.cos(angle), np.sin(angle)\n rotmat = np.array([[cos + ax * ax * (1 - cos), ax * ay * (1 - cos) - az * sin, ax * az * (1 - cos) + ay * sin],\n [ay * ax * (1 - cos) + az * sin, cos + ay * ay * (1 - cos), ay * az * (1 - cos) - ax * sin],\n [az * ax * (1 - cos) - ay * sin, az * ay * (1 - cos) + ax * sin, cos + az * az * (1 - cos)]])\n return rotmat", "def rotationMatrix_RzRyRz(self):\n\n R = Compute3DRotationMatrix_RzRyRz(self.exteriorOrientationParameters[3], self.exteriorOrientationParameters[4],\n self.exteriorOrientationParameters[5])\n\n return R" ]
[ "0.8186974", "0.802003", "0.7951418", "0.76159006", "0.75433534", "0.74739504", "0.7473712", "0.7445926", "0.7439543", "0.74234265", "0.736115", "0.7298137", "0.7291585", "0.72050965", "0.7194949", "0.7179259", "0.71736616", "0.71396255", "0.71220154", "0.71090776", "0.7098334", "0.70957506", "0.70957506", "0.70957506", "0.70957506", "0.7043044", "0.7041205", "0.70372653", "0.7017589", "0.7012499" ]
0.84704345
0
Compute the geodesic distance on the sphere for two points. The points are assumed to lie on the surface of the same sphere.
def spherical_distances(x, y): # Compute the norms of all points, we do NOT check they actually all lie on # the same sphere (that's the caller's responsibility). xn = np.sqrt((x**2).sum(axis=1)) yn = np.sqrt((y**2).sum(axis=1)) ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :]) # Protect against numerical noise giving us cosine values outside the -1,1 # range, where arccos would return nans. ang_cos = np.clip(ang_cos, -1, 1) return xn[:, None]*np.arccos(ang_cos)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def distance_sphere(self, other):\n if not self.crs == getattr(other, \"crs\", \"EPSG:4326\") == \"EPSG:4326\":\n raise ValueError(\"Only can calculate spherical distance with 'EPSG:4326' crs.\")\n return _binary_op(arctern.ST_DistanceSphere, self, other)", "def distance_on_sphere(lat1, long1, lat2, long2):\n degrees_to_radians = math.pi/180.0\n \n # phi = 90 - latitude\n phi1 = (90.0 - float(lat1))*degrees_to_radians\n phi2 = (90.0 - float(lat2))*degrees_to_radians\n \n # theta = longitude\n theta1 = float(long1)*degrees_to_radians\n theta2 = float(long2)*degrees_to_radians\n \n # Compute spherical distance from spherical coordinates.\n \n # For two locations in spherical coordinates\n # (1, theta, phi) and (1, theta', phi')\n # cosine( arc length ) =\n # sin phi sin phi' cos(theta-theta') + cos phi cos phi'\n # distance = rho * arc length\n \n cos = (math.sin(phi1)*math.sin(phi2)*math.cos(theta1 - theta2) +\n math.cos(phi1)*math.cos(phi2))\n arc = math.acos( cos )\n \n # Remember to multiply arc by the radius of the earth\n # in your favorite set of units to get length.\n return round(arc * 6373 / 10 * 60)", "def spherical_distance(lat1, lon1, lat2, lon2):\n # convert decimal degrees to radians\n lon1, lat1, lon2, lat2 = map(radians, [lon1, lat1, lon2, lat2])\n # haversine formula\n dlon = lon2 - lon1\n dlat = lat2 - lat1\n a = sin(dlat/2)**2 + cos(lat1) * cos(lat2) * sin(dlon/2)**2\n c = 2 * asin(sqrt(a))\n km = 6373 * c\n km = '%d' % km\n return float(km)", "def calculateDistanceBetweenPoints(lat1,lon1,lat2,lon2):\n\treturn Geodesic.WGS84.Inverse(lat1,lon1, lat2, lon2)['s12']", "def geodesic_distance(coord1, coord2):\n # convert coordinates to radians\n s = math.pi * np.squeeze(np.array(coord1)) / 180\n f = math.pi * np.squeeze(np.array(coord2)) / 180\n\n delta = (f - s)/2\n t = math.cos(f[0]) * math.cos(s[0]) * math.sin(delta[1])**2 + math.sin(delta[0])**2\n\n return earth_radius() * 2 * math.atan2(t**(1/2),(1-t)**(1/2))", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance(lat1, lon1, lat2, lon2):\r\n radius = 6373 * 1000\r\n dlon = lon2 - lon1\r\n dlat = lat2 - lat1\r\n a = (math.sin(dlat/2))**2 + math.cos(lat1) * math.cos(lat2) * (math.sin(dlon/2))**2\r\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\r\n return radius * c", "def distance(lat1, lon1, lat2, lon2):\r\n earth_radius=3959.0 #miles\r\n if lat1==lat2 and lon1==lon2:\r\n dst=0\r\n else:\r\n dst = acos(\r\n (sin(radians(lat1)) * sin(radians(lat2))) +\r\n (cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(lon1) - radians(lon2)))\r\n ) * earth_radius\r\n return dst", "def calculate_distance(point1, point2):\n import math\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def gpx_distance(lat1, lon1, lat2, lon2):\n theta = lon1 - lon2\n rads = sin(radians(lat1)) * sin(radians(lat2)) + cos(radians(lat1)) * cos(radians(lat2)) * cos(radians(theta))\n\n # make sure rads is [-1, 1]\n rads = 1 if rads > 1 else rads\n rads = -1 if rads < -1 else rads\n\n rads = acos(rads)\n\n # multiply by radius of the earth to get distance\n return rads * 6367", "def dist_between_spheres(r1, r2, Y, C):\n h = C - Y\n \n d1 = np.sqrt(r1**2 - h**2)\n d2 = np.sqrt(r2**2 - h**2)\n\n dist = r1 - d1 + r2 - d2\n \n return dist", "def get_spherical_distance(lat1,lat2,long1,long2):\n lat1,lat2,long1,long2= float(lat1),float(lat2),float(long1),float(long2)\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def calculate_distance(point1, point2):\n\n def convert_to_radians(degrees):\n return degrees * math.pi / 180\n\n radius_earth = 6.371E3 # km\n phi1 = convert_to_radians(point1[0])\n phi2 = convert_to_radians(point2[0])\n delta_phi = convert_to_radians(point1[0] - point2[0])\n delta_lam = convert_to_radians(point1[1] - point2[1])\n\n a = math.sin(0.5 * delta_phi)**2 + math.cos(phi1) * math.cos(phi2) * math.sin(0.5 * delta_lam)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n return radius_earth * c / 1.60934 # convert km to miles", "def distance_to(self, other):\n if type(other) == GeoPoint:\n other = other.to_cartesian()\n d0 = self.x - other.x\n d1 = self.y - other.y\n d2 = self.z - other.z\n\n return math.sqrt(d0 * d0 + d1 * d1 + d2 * d2)", "def getDistance(point1, point2):\n\n \"\"\"Convert in radians\"\"\"\n lat1 = radians(point1.getLatitude())\n lon1 = radians(point1.getLongitude())\n lat2 = radians(point2.getLatitude())\n lon2 = radians(point2.getLongitude())\n d_lon = lon2 - lon1\n d_lat = lat2 - lat1\n\n \"\"\"Approximate radius of earth in km\"\"\"\n R = 6373.0\n\n \"\"\"Apply the formula\"\"\"\n a = sin(d_lat / 2)**2 + cos(lat1) * cos(lat2) * sin(d_lon / 2)**2\n c = 2 * atan2(sqrt(a), sqrt(1 - a))\n\n \"\"\"Get the distance between point1 and point2\"\"\"\n distance = R * c\n\n return distance", "def distance(latitude_1: float, longitude_1: float, latitude_2: float, longitude_2: float) -> float:\n lat1, lon1, lat2, lon2 = map(radians, (latitude_1, longitude_1, latitude_2, longitude_2))\n return (\n 2\n * EARTH_RADIUS\n * asin(\n sqrt(\n sin((lat2 - lat1) / 2) ** 2 + cos(lat1) * cos(lat2) * (sin((lon2 - lon1) / 2) ** 2)\n )\n )\n )", "def get_spherical_distance(lat1,lat2,long1,long2):\n q=radians(lat2-lat1)\n r=radians(long2-long1)\n lat2r=radians(lat2)\n lat1r=radians(lat1)\n a=sin(q/2)*sin(q/2)+cos(lat1r)*cos(lat2r)*sin(r/2)*sin(r/2)\n c=2*atan2(sqrt(a),sqrt(1-a))\n R=6371*1000\n d=R*c\n return d", "def geodesicDistance(A, B = geolocate(\"Colosseo\")):\n # colosseo = (41.890183, 12.492369)\n return geopy.distance.vincenty(A, B).meters", "def distance(lat1, lon1, lat2, lon2):\n lon1, lat1 = math.radians(lon1), math.radians(lat1)\n lon2, lat2 = math.radians(lon2), math.radians(lat2)\n a = (math.sin((lat2 - lat1) / 2) ** 2 +\n math.cos(lat1) * math.cos(lat2) * math.sin((lon2 - lon1) / 2) ** 2)\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1 - a))\n d = 6371000 * c\n\n return d", "def distance(self, coord1, coord2):\n sinsin_lat = coord1.lat.sin() * coord2.lat.sin()\n coscos_lat = coord1.lat.cos() * coord2.lat.cos()\n cos_deltalong = coord1.delta_long(coord2).cos()\n\n angle = AngleDeg().acos(sinsin_lat + coscos_lat * cos_deltalong)\n\n return angle.dist_from_radius(EARTH_RADIUS)", "def get_distance(first: Point, second: Point) -> Float:\n\n return sqrt(\n (second.x - first.x) ** 2\n +\n (second.y - first.y) ** 2\n )", "def geo_distance(lat1,lon1,lat2,lon2):\n \n # radius of earth in km\n R=6373.0\n\n # pi\n pi=math.pi\n\n lat1=math.radians(lat1)\n lat2=math.radians(lat2)\n lon1=math.radians(lon1)\n lon2=math.radians(lon2)\n\n dlon=lon2 - lon1\n dlat=lat2 - lat1\n\n a=sin(dlat / 2)**2 + cos(lat1) * cos(lat2) * sin(dlon / 2)**2\n c=2 * atan2(sqrt(a), sqrt(1 - a))\n\n distance=R * c\n\n tc1=atan2(sin(lon2-lon1)*cos(lat2),\n cos(lat1)*sin(lat2)-sin(lat1)*cos(lat2)*cos(lon2-lon1))\n\n tc1=tc1 % (2*pi)\n\n bearing=math.degrees(tc1)\n\n return [distance,bearing]", "def get_distance(lat1, lon1, lat2, lon2) -> float:\n # Earth radius in meters\n radius = 6371000\n\n # Degress to radian\n lat1, lon1, lat2, lon2 = map(np.deg2rad, [lat1, lon1, lat2, lon2])\n\n # Deltas\n dlat = lat2 - lat1\n dlon = lon2 - lon1\n\n # Calculate distance\n arch = np.sin(dlat / 2) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2) ** 2\n arch_sin = 2 * np.arcsin(np.sqrt(arch))\n\n return radius * arch_sin", "def distance(self, coord1, coord2):\n return (abs(coord1.x - coord2.x) + abs(coord1.y - coord2.y) + abs(coord1.z - coord2.z))//2", "def get_distance(lat1, lon1, lat2, lon2):\n phi1 = math.radians(lat1)\n phi2 = math.radians(lat2)\n d_phi = math.radians(lat2 - lat1)\n d_lam = math.radians(lon2 - lon1)\n a = math.sin(d_phi/2) ** 2 + math.cos(phi1) * math.cos(phi2) * math.sin(d_lam/2)**2\n c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))\n return 6371000 * c", "def get_distance(p1, p2):\n\n deg_rad = math.pi / 180\n\n dphi = p1[1] - p2[1]\n phim = 0.5 * (p1[1] + p2[1])\n dlam = p1[0] - p2[0]\n\n k1 = (111.13209 - 0.56605 * math.cos(2 * phim * deg_rad) + 0.00120 * \n math.cos(4 * phim * deg_rad))\n k2 = (111.41513 * math.cos(phim * deg_rad) - 0.09455 * \n math.cos(3 *phim * deg_rad) + 0.0012 * math.cos(5 * phim * deg_rad))\n\n return numpy.sqrt(k1**2 * dphi**2 + k2**2 * dlam**2)", "def distance(a: Point, b: Point) -> float:\n return math.sqrt(math.pow(b.x - a.x, 2) + math.pow(b.y - a.y, 2))", "def distance_between(lat_1, lon_1, lat_2, lon_2):\n lat_1, lon_1 = math.radians(lat_1), math.radians(lon_1)\n lat_2, lon_2 = math.radians(lat_2), math.radians(lon_2)\n theta = lon_1 - lon_2\n dist = math.sin(lat_1)*math.sin(lat_2) + math.cos(lat_1)*math.cos(lat_2)*math.cos(theta)\n dist = math.acos(dist)\n dist = math.degrees(dist)\n dist = dist * 69.06 # 69.09 = circumference of earth in miles / 360 degrees\n return dist", "def dist_sf( lon1, lon2, lat1, lat2 ):\n\n subfalla_i = (lon1, lat1)\n subfalla_j = (lon2, lat2)\n distancia = distance.distance( subfalla_i, subfalla_j ).meters\n\n return distancia" ]
[ "0.7638325", "0.70502967", "0.6934199", "0.6927222", "0.6903243", "0.6792316", "0.6787839", "0.6785977", "0.6757207", "0.6749283", "0.6746814", "0.6737798", "0.6729415", "0.6711453", "0.66890496", "0.66774637", "0.664188", "0.6629779", "0.6612115", "0.66114235", "0.66067094", "0.65922534", "0.65896803", "0.65835136", "0.65729874", "0.65598667", "0.6552568", "0.6549336", "0.65461516", "0.6543318" ]
0.7146757
1
Compute a similarity matrix for a set of points. The points are assumed to lie on the surface of the same sphere.
def similarity_matrix(points, sigma): distances_squared = spherical_distances(points, points)**2 return np.exp( -distances_squared / (2.0 * sigma) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def self_similarity_matrix(feature_vectors):\n norm_feature_vectors, mean, std = at.normalize_features([feature_vectors.T])\n norm_feature_vectors = norm_feature_vectors[0].T\n sim_matrix = 1.0 - distance.squareform(\n distance.pdist(norm_feature_vectors.T, 'cosine'))\n return sim_matrix", "def build_matrix(self):\n \n for p1 in self._properties: \n p1 = p1.get_vectorized_data()\n \n for p2 in self._properties:\n p2 = p2.get_vectorized_data()\n v1, v2 = self.prepare_vectors(p1, p2)\n self._similarity_matrix.append(cosine_similarity([v1],[v2]))", "def cosine_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = cosine_similarity(references[i, :], queries[j, :])\n return scores", "def compute_similarity_transform(source_points, target_points):\n assert target_points.shape[0] == source_points.shape[0]\n assert target_points.shape[1] == 3 and source_points.shape[1] == 3\n source_points = source_points.T\n target_points = target_points.T\n mu1 = source_points.mean(axis=1, keepdims=True)\n mu2 = target_points.mean(axis=1, keepdims=True)\n X1 = source_points - mu1\n X2 = target_points - mu2\n var1 = np.sum(X1 ** 2)\n K = X1.dot(X2.T)\n U, _, Vh = np.linalg.svd(K)\n V = Vh.T\n Z = np.eye(U.shape[0])\n Z[-1, -1] *= np.sign(np.linalg.det(U.dot(V.T)))\n R = V.dot(Z.dot(U.T))\n scale = np.trace(R.dot(K)) / var1\n t = mu2 - scale * R.dot(mu1)\n source_points_hat = scale * R.dot(source_points) + t\n source_points_hat = source_points_hat.T\n return source_points_hat", "def test_cosine_similarity_matrix():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix.py_func(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def test_cosine_similarity_matrix_compiled():\n vectors1 = np.array([[1, 1, 0, 0],\n [1, 0, 1, 1]])\n vectors2 = np.array([[0, 1, 1, 0],\n [0, 0, 1, 1]])\n\n scores = cosine_similarity_matrix(vectors1, vectors2)\n expected_scores = np.array([[0.5, 0.],\n [0.40824829, 0.81649658]])\n assert scores == pytest.approx(expected_scores, 1e-7), \"Expected different scores.\"", "def project_to_sphere(points):\n # for uv, the sphere: r=1, azimuth(phi): 2*pi*u, elevation(theta): 2*pi*v\n # theta is elevation, phi is azimuth\n r, theta, phi = cs.cart2sp(x=points[:, 0], y=points[:, 1], z=points[:, 2])\n # logger.info(f\"number of zero points in r: {np.sum(r==0)}\")\n assert np.sum(r == 0) == 0, \"points contains zeros\"\n points_sphere = points / r.reshape(-1, 1)\n return points_sphere, r, theta, phi\n\n # r, theta, phi = cs.cart2sp(x=1, y=1, z=1)\n\n # # spherical to cartesian\n # x, y, z = cs.sp2cart(r=1, theta=np.pi/4, phi=np.pi/4)\n\n # # cartesian to cylindrical\n # r, phi, z = cs.cart2cyl(x=1, y=1, z=1)", "def get_sims(centroids):\n\n sims = []\n length = len(centroids)\n \n for i in xrange(0, length):\n for j in xrange(i + 1, length):\n sims.append(similarity(centroids[i], centroids[j]))\n \n return sims", "def matrix(self, references: List[Spectrum], queries: List[Spectrum],\n array_type: str = \"numpy\",\n is_symmetric: bool = False) -> np.ndarray:\n reference_vectors = self.calculate_vectors(references)\n if is_symmetric:\n assert np.all(references == queries), \\\n \"Expected references to be equal to queries for is_symmetric=True\"\n query_vectors = reference_vectors\n else:\n query_vectors = self.calculate_vectors(queries)\n\n ms2ds_similarity = cosine_similarity_matrix(reference_vectors, query_vectors)\n return ms2ds_similarity", "def get_sim_matrix(centroids):\n\n matrix = {}\n length = len(centroids)\n\n for i in xrange(0, length):\n matrix[i] = {}\n\n for j in xrange(i + 1, length):\n matrix[i][j] = similarity(centroids[i], centroids[j])\n\n return matrix", "def similarity_matrix(P, similarity_measure, normalize=True, inverse=True):\n N = len(P) \n S = np.zeros((N, N))\n for i in range(N): \n for j in range(i): \n S[i][j] = similarity_measure(P[i], P[j])\n\n S = square(S)\n if normalize: \n S = S / np.max(S)\n if inverse:\n S = 1 - S # Higher value = more similar\n\n return S", "def cosine_similarity(X):\n matrix = X.dot(X.transpose()).todense()\n mat_len = len(matrix)\n norms = [0] * mat_len\n for i in range(0, mat_len):\n norms[i] = 1.0 / np.sqrt(matrix.item((i, i)))\n norm_mat = np.matrix(norms)\n return np.multiply(norm_mat.transpose().dot(norm_mat), matrix)", "def fix_sphere_m (center_x, center_y, center_z, radius, centers, radii, len_points):\n \n g_x = []\n g_y = []\n g_z = []\n points = [hydrogen_coord_gen(center_x, center_y, center_z, radius) for i in range(0, len_points)] \n x = [points[i][0] for i in range(0, len(points))] \n y = [points[i][1] for i in range(0, len(points))]\n z = [points[i][2] for i in range(0, len(points))]\n\n for i in range(0, len(points)):\n check = 0\n j = 0\n while (j <= (len(centers) - 1) and (check == 0)): \n if (calculate_3D_distance_2_centers(x[i], y[i], z[i], centers[j][0], centers[j][1], centers[j][2]) < radii[j]):\n check += 1\n j += 1\n if (check == 0):\n g_x.append(x[i])\n g_y.append(y[i])\n g_z.append(z[i])\n\n return g_x, g_y, g_z", "def spherical_distances(x, y):\n # Compute the norms of all points, we do NOT check they actually all lie on\n # the same sphere (that's the caller's responsibility).\n \n xn = np.sqrt((x**2).sum(axis=1))\n yn = np.sqrt((y**2).sum(axis=1))\n ang_cos = np.dot(x, y.T)/(xn[:, None]*yn[None, :])\n # Protect against numerical noise giving us cosine values outside the -1,1\n # range, where arccos would return nans.\n ang_cos = np.clip(ang_cos, -1, 1)\n\n return xn[:, None]*np.arccos(ang_cos)", "def dice_similarity_matrix(references: np.ndarray, queries: np.ndarray) -> np.ndarray:\n size1 = references.shape[0]\n size2 = queries.shape[0]\n scores = np.zeros((size1, size2))\n for i in range(size1):\n for j in range(size2):\n scores[i, j] = dice_similarity(references[i, :], queries[j, :])\n return scores", "def fit_hypersphere(data, method=\"Hyper\"):\n num_points = len(data)\n# print >>stderr, \"DEBUG: num_points=\", num_points\n \n if num_points==0:\n return (0,None)\n if num_points==1:\n return (0,data[0])\n dimen = len(data[0]) # dimensionality of hypersphere\n# print >>stderr, \"DEBUG: dimen=\", dimen\n \n if num_points<dimen+1:\n raise ValueError(\\\n \"Error: fit_hypersphere needs at least {} points to fit {}-dimensional sphere, but only given {}\".format(dimen+1,dimen,num_points))\n \n # central dimen columns of matrix (data - centroid)\n central = np.matrix(data, dtype=float) # copy the data\n centroid = np.mean(central, axis=0)\n for row in central:\n row -= centroid\n# print >>stderr, \"DEBUG: central=\", repr(central)\n\n # squared magnitude for each centered point, as a column vector\n square_mag= [sum(a*a for a in row.flat) for row in central] \n square_mag = np.matrix(square_mag).transpose()\n# print >>stderr, \"DEBUG: square_mag=\", square_mag\n \n if method==\"Taubin\":\n # matrix of normalized squared magnitudes, data\n mean_square = square_mag.mean()\n data_Z = np.bmat( [[(square_mag-mean_square)/(2*sqrt(mean_square)), central]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n param_vect= v[-1,:]\n params = [ x for x in np.asarray(param_vect)[0]] # convert from (dimen+1) x 1 matrix to list\n params[0] /= 2*sqrt(mean_square)\n params.append(-mean_square*params[0])\n params=np.array(params)\n \n else:\n # matrix of squared magnitudes, data, 1s\n data_Z = np.bmat( [[square_mag, central, np.ones((num_points,1))]])\n # print >> stderr, \"DEBUG: data_Z=\",data_Z\n\n # SVD of data_Z\n # Note: numpy's linalg.svd returns data_Z = u * s * v\n # not u*s*v.H as the Release 1.4.1 documentation claims.\n # Newer documentation is correct.\n u,s,v = linalg.svd(data_Z, full_matrices=False)\n # print >>stderr, \"DEBUG: u=\",repr(u)\n # print >>stderr, \"DEBUG: s=\",repr(s)\n # print >>stderr, \"DEBUG: v=\",repr(v)\n # print >>stderr, \"DEBUG: v.I=\",repr(v.I)\n\n if s[-1]/s[0] < 1e-12:\n # singular case\n # param_vect as (dimen+2) x 1 matrix\n param_vect = v[-1,:]\n # Note: I get last ROW of v, while Chernov claims last COLUMN,\n # because of difference in definition of SVD for MATLAB and numpy\n\n # print >> stderr, \"DEBUG: singular, param_vect=\", repr(param_vect)\n # print >> stderr, \"DEBUG: data_Z*V=\", repr(data_Z*v)\n # print >> stderr, \"DEBUG: data_Z*VI=\", repr(data_Z*v.I)\n # print >> stderr, \"DEBUG: data_Z*A=\", repr(data_Z*v[:,-1])\n else: \n Y = v.H*np.diag(s)*v\n Y_inv = v.H*np.diag([1./x for x in s])*v\n # print >>stderr, \"DEBUG: Y=\",repr(Y)\n # print >>stderr, \"DEBUG: Y.I=\",repr(Y.I), \"\\nY_inv=\",repr(Y_inv)\n #Ninv is the inverse of the constraint matrix, after centroid has been removed\n Ninv = np.asmatrix(np.identity(dimen+2, dtype=float))\n if method==\"Hyper\":\n Ninv[0,0] = 0\n Ninv[0,-1]=0.5\n Ninv[-1,0]=0.5\n Ninv[-1,-1] = -2*square_mag.mean()\n elif method==\"Pratt\":\n Ninv[0,0] = 0\n Ninv[0,-1]=-0.5\n Ninv[-1,0]=-0.5\n Ninv[-1,-1]=0\n else: \n raise ValueError(\"Error: unknown method: {} should be 'Hyper', 'Pratt', or 'Taubin'\")\n # print >> stderr, \"DEBUG: Ninv=\", repr(Ninv)\n\n # get the eigenvector for the smallest positive eigenvalue\n matrix_for_eigen = Y*Ninv*Y\n # print >> stderr, \"DEBUG: {} matrix_for_eigen=\\n{}\".format(method, repr(matrix_for_eigen))\n eigen_vals,eigen_vects = linalg.eigh(matrix_for_eigen)\n # print >> stderr, \"DEBUG: eigen_vals=\", repr(eigen_vals)\n # print >> stderr, \"DEBUG: eigen_vects=\", repr(eigen_vects)\n\n positives = [x for x in eigen_vals if x>0]\n if len(positives)+1 != len(eigen_vals):\n # raise ValueError(\"Error: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals))\n print>>stderr, \"Warning: for method {} exactly one eigenvalue should be negative: {}\".format(method,eigen_vals)\n smallest_positive = min(positives)\n # print >> stderr, \"DEBUG: smallest_positive=\", smallest_positive\n # chosen eigenvector as 1 x (dimen+2) matrix\n A_colvect =eigen_vects[:,list(eigen_vals).index(smallest_positive)]\n # print >> stderr, \"DEBUG: A_colvect=\", repr(A_colvect)\n # now have to multiply by Y inverse\n param_vect = (Y_inv*A_colvect).transpose()\n # print >> stderr, \"DEBUG: nonsingular, param_vect=\", repr(param_vect) \n params = np.asarray(param_vect)[0] # convert from (dimen+2) x 1 matrix to array of (dimen+2)\n\n \n# print >> stderr, \"DEBUG: params=\", repr(params)\n radius = 0.5* sqrt( sum(a*a for a in params[1:-1])- 4*params[0]*params[-1])/abs(params[0])\n center = -0.5*params[1:-1]/params[0]\n#y print >> stderr, \"DEBUG: center=\", repr(center), \"centroid=\", repr(centroid)\n center += np.asarray(centroid)[0]\n return (radius,center)", "def get_sphere_info(points):\n rib = np.sum(points, axis=0)\n rib3d = proj3d(rib)\n pts3d = np.asarray([proj3d(p) for p in points])\n face_size = np.linalg.norm(pts3d[0] - rib3d)\n\n M = np.ones((4, 4), dtype=np.float)\n M[:3, :3] = pts3d[:3]\n M[3, :3] = rib3d\n b = [-sum(x*x) for x in M[:, :3]]\n # if this is a plane\n if abs(np.linalg.det(M)) < 1e-4:\n center = rib3d\n return True, center, None, face_size\n else:\n T = np.linalg.solve(M, b)\n D, E, F, G = T\n center = -0.5 * T[:3]\n radius = 0.5 * np.sqrt(D*D + E*E + F*F - 4*G)\n return False, center, radius, face_size", "def sfm(points):\n # Construct the required W/Rh/Sh matrices.\n\t\n # Get ih/jh from Rh and use them to find Q.\n\n # Use Q, Rh, and Sh to get R and S.\n\n # Extract the F 2x3 rotation matrices from R and form an (F,2,3) array of\n # rotation matrices.\n\n # Build an orthonormal matrix that rotates the first R matrix into an\n # identity matrix.\n\n # Apply the computed rotation matrix to the rotation matrices and the\n # points in S.\n\n # Return the R matrices and an ** Nx3 ** matrix containing the\n # reconstructed 3D points (note that S is 3xN).\n return None", "def best_fit(cls, points: array_like) -> Sphere:\n points = Points(points)\n\n if points.dimension != 3:\n raise ValueError(\"The points must be 3D.\")\n\n if points.shape[0] < 4:\n raise ValueError(\"There must be at least 4 points.\")\n\n if points.affine_rank() != 3:\n raise ValueError(\"The points must not be in a plane.\")\n\n n = points.shape[0]\n A = np.hstack((2 * points, np.ones((n, 1))))\n b = (points**2).sum(axis=1)\n\n c, _, _, _ = np.linalg.lstsq(A, b, rcond=None)\n\n center = c[:3]\n radius = float(np.sqrt(np.dot(center, center) + c[3]))\n\n return cls(center, radius)", "def get_similarity(self, ):\r\n customer_cos_similarity = cosine_similarity(self.rating_matrix, self.rating_matrix)\r\n customer_cos_similarity = pd.DataFrame(customer_cos_similarity,\r\n index=self.customer_vendor_matrix.index,\r\n columns=self.customer_vendor_matrix.index)\r\n # customer_pearson_similarity = np.corrcoef(self.rating_matrix,\r\n # self.rating_matrix,)\r\n # customer_pearson_similarity = pd.DataFrame(customer_pearson_similarity,\r\n # index=self.customer_vendor_matrix.index,\r\n # columns=self.customer_vendor_matrix.index)\r\n return customer_cos_similarity,\r\n # return customer_pearson_similarity run too slowly\r", "def pairwise_cosine(mat):\n def cosine_similarity(a, b):\n return (a * b).sum() / (np.linalg.norm(a) * np.linalg.norm(b))\n\n n = len(mat)\n dist_mat = np.zeros((n, n))\n for i in range(n):\n for j in range(i + 1, n):\n dist = cosine_similarity(mat[i], mat[j])\n dist_mat[i, j] = dist\n dist_mat[j, i] = dist\n\n dist_mat[np.isnan(dist_mat)] = 0\n\n return dist_mat", "def get_distance_matrices(points, bounds=None, one_point_ok=False):\n cPoints = len(points)\n if cPoints < 2 and not one_point_ok:\n raise ValueError(\"Distance mtx for one point is the point's dimensions. Perhaps you meant to provide more than one point to this function. Maybe you need to unpack your list/tuple.\")\n # Ensure each point has the same dimension\n cDim = len(points[0]) # count of dimensions\n for p in points:\n assert len(p) == cDim\n aPoints = np.array(points)\n # Use an inner iteration function because it's more versatile and easier to code than appending to a list\n def _iter():\n for i in xrange(cDim):\n xs = aPoints[:, i]\n xdist = np.tile(xs, (cPoints, 1))\n xdist = xdist - xdist.T\n if bounds is not None:\n try:\n min_b, max_b = bounds[i]\n width = max_b - min_b\n except IndexError:\n raise Exception(\"There aren't enough boundaries for the number of dimensions in the points. Ensure that your bounds are of the same dimension as your points.\")\n # Can't use mod because the lower triangle is negative and it wraps around weird\n## xdist = xdist % (width / 2.0)\n xdist[xdist > width / 2.0] -= width\n assert not np.any(xdist > width/2.0)\n xdist[xdist < -width / 2.0] += width\n assert not np.any(xdist < -width/2.0)\n yield xdist\n linear_distances = list(_iter())\n radial_distance = np.zeros_like(linear_distances[0])\n for x in linear_distances:\n radial_distance += x**2\n radial_distance = np.sqrt(radial_distance)\n linear_distances.append(radial_distance) # too lazy to name a temp variable\n return linear_distances", "def distance_from_sphere(self, points, params, sqrt=False):\n center, radius = params\n center = center.reshape((1, 3))\n distance = (torch.norm(points - center, p=2, dim=1) - radius) ** 2\n if sqrt:\n distance = guard_sqrt(distance)\n\n if self.reduce:\n distance = torch.mean(distance)\n return distance", "def sphere_centers(r_x, r_y, r_z):\n a_ccs_p_trans_m = hom_translation_matrix(\n t_x=0.265, t_y=0, t_z=0.014)\n a_ccs_p_rot_m = hom_rotation(x_axis_rotation_matrix(r_x) @\n y_axis_rotation_matrix(r_y) @\n z_axis_rotation_matrix(r_z))\n a_p_sph_1_2 = hom_translation_matrix(\n t_x=0.015, t_y=0.029, t_z=-0.0965)\n a_p_sph_2_2 = hom_translation_matrix(\n t_x=0.015, t_y=-0.029, t_z=-0.0965)\n\n a_ccs_ = a_ccs_p_trans_m @ a_ccs_p_rot_m\n a_c1 = a_ccs_ @ a_p_sph_1_2\n a_c2 = a_ccs_ @ a_p_sph_2_2\n\n return get_translation(a_c1), get_translation(a_c2)", "def points_to_matrix(points):\n points_matrix = np.matrix(points, dtype=np.float64).transpose()\n omega = np.ones(len(points), dtype=np.float64)\n points_matrix = np.matrix(np.vstack((points_matrix, omega)))\n return points_matrix", "def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))", "def f(points):\n distances = np.zeros((points.shape[0],1))\n for i in range(len(points)):\n #print points[i,:], points[i,:]**2\n distances[i] = np.sqrt(np.sum(points[i,:]**2))\n return distances * np.sin(distances)", "def similarity_matrix(feat_mat):\n sim_mat = cosine_similarity(feat_mat)\n np.fill_diagonal(sim_mat, 0)\n return sim_mat", "def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix", "def pairwise_cosine_similarity(x, y):\n x = torch.div(x, torch.sqrt(torch.max(torch.sum(x ** 2), 1e-12)))\n y = torch.div(y, torch.sqrt(torch.max(torch.sum(y ** 2), 1e-12)))\n return torch.mm(x, torch.transpose(y, 1, 0))" ]
[ "0.66129136", "0.63335097", "0.62122184", "0.6183862", "0.60915583", "0.60669845", "0.59568816", "0.5932109", "0.5899266", "0.588513", "0.5874931", "0.5850991", "0.5815165", "0.5814901", "0.58031887", "0.5772447", "0.5766222", "0.5751658", "0.57376456", "0.5730853", "0.57160264", "0.57133245", "0.56603676", "0.5656061", "0.56377786", "0.56176496", "0.55887806", "0.5571803", "0.5566374", "0.55611163" ]
0.7539285
0
Decorator to help verify that a function was actually executed. Annotates a function with an attribute 'didrun', and only sets it to True if the function is actually called.
def checkrun(f): @functools.wraps(f) def wrapper(*args, **kwargs): wrapper.didrun = True return f(*args, **kwargs) wrapper.didrun = False return wrapper
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_called(self, func):\n self.called[func] = False\n def _check(*args, **kwargs):\n self.called[func] = True\n return func(*args, **kwargs)\n return _check", "def run_once(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = func(*args, **kwargs)\n wrapper.has_run = True\n return result\n wrapper.has_run = False\n return wrapper", "def post_run_func_checked(driver: HammerDriver) -> None:\n if post_run_func is not None:\n post_run_func(driver)", "def assertion_passed(self, func):", "def check_before_executing(f):\n @functools.wraps(f)\n def wrapper(self, *args, **kwargs):\n if not self._checked:\n assert self.is_correct, (\n 'The MatchList is incorrectly constructed. '\n 'Run check_and_print_if_error() for details.')\n return f(self, *args, **kwargs)\n return wrapper", "def check_in_use(f):\n\n def wrapped(self, *args, **kwargs):\n if self.fired:\n raise InUse(_(\"Executor in use\"))\n return f(self, *args, **kwargs)\n return wrapped", "def test_func(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit(func)), types.FunctionType)", "def does_it_run(func, args):\n \n if args is None:\n func()\n else:\n func(*args)", "def check_mocked_functions_called(*mocked_functions):\n for mocked_function in mocked_functions:\n assert_that(mocked_function.called, f\"The function was not called - {mocked_function}\")", "def _func_only(func):\n if inspect.isfunction(func):\n return\n else:\n raise Exception(\"Only functions can be tasks\")", "def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))", "def run_once(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not wrapper.has_run:\n result = f(*args, **kwargs)\n wrapper.has_run = True\n wrapper.result = result\n \n return wrapper.result\n \n wrapper.has_run = False\n return wrapper", "def add_check_function(check_function: Callable):\n\n def decorator(func: Callable):\n @wraps(func)\n def wrapper(*args, **kwargs):\n check_function(*args, *kwargs.values())\n return func(*args, **kwargs)\n\n return wrapper\n\n name = getattr(check_function, '__name__', '`func`')\n decorator.__doc__ = f\"Check the function's arguments via `{name}` before calling it.\"\n return decorator", "def test_func_2(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_2(func)), types.FunctionType)", "def traced_function_wrong(function):\n logger.debug(\"started execution of %s\", function)\n start_time = time.time()\n\n @wraps(function)\n def wrapped(*args, **kwargs):\n result = function(*args, **kwargs)\n logger.info(\n \"function %s took %.2fs\", function, time.time() - start_time\n )\n return result\n\n return wrapped", "def final_check(self):\n for func in self.called.keys():\n self.assertTrue(self.called[func], \"%s was not called\" % (func,))", "def test_that_original_func_saved():\n\n assert callable(custom_sum.__original_func)", "def decorator(func):\n\n pass", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILED.\".format(linenum))\n print(msg)", "def test(did_pass):\n linenum = sys._getframe(1).f_lineno # Get the caller's line number.\n if did_pass:\n msg = \"Test at line {0} ok.\".format(linenum)\n else:\n msg = (\"Test at line {0} FAILIED.\".format(linenum))\n print(msg)" ]
[ "0.65756035", "0.6015735", "0.60046905", "0.5997352", "0.59535944", "0.5899848", "0.5813919", "0.57161885", "0.5611487", "0.55894804", "0.5569097", "0.556421", "0.55513084", "0.55510217", "0.55382", "0.5538129", "0.55316585", "0.5526899", "0.55099505", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504471", "0.5504363", "0.5502706", "0.5502706", "0.5497879" ]
0.8174126
0
Users can specify environment variables in their config file which will be set in the driver and worker environments. Make sure those variables are set during the workflow, but not after.
def test_workflow_environment(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "environment-variables": { "FOO": "BAR", "FOO2": "BAR2" } } template_dir = tempfile.mkdtemp(suffix="test-workflow-environment-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): def _check(): assert os.environ['FOO'] == "BAR" assert os.environ["OMP_NUM_THREADS"] == '1' return True # driver env _check() # worker env assert all(workflow_inst.run_on_each_worker(_check).values()) os.environ['FOO'] = 'ORIGINAL_FOO' _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # Environment is restored after execution is finished. assert os.environ['FOO'] == 'ORIGINAL_FOO' assert 'FOO2' not in os.environ
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def env_config():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_GCP_REGION': 'expected-gcp-region',\n 'ELB_GCP_ZONE': 'expected-gcp-zone',\n 'ELB_BATCH_LEN': '93',\n 'ELB_CLUSTER_NAME': 'expected-cluster-name',\n 'ELB_RESULTS': 'gs://expected-results',\n 'ELB_USE_PREEMPTIBLE': 'true',\n 'ELB_BID_PERCENTAGE': '91'}\n\n for var_name in env:\n os.environ[var_name] = str(env[var_name])\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def test_local_env_pass_explicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_explicit\"\n\n assert env_key not in os.environ\n\n test_dir = fileutils.make_test_dir()\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n env_vars = {env_key: exp_value} # <-- explicitly passing a new env var to task\n settings = RunSettings(\n exe_name, exe_args, run_command=\"srun\", run_args=run_args, env_vars=env_vars\n )\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key in settings.env_vars\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0", "def _setup_env(self):\n\n os.environ['GIT_NAME'] = statiki.GIT_NAME\n os.environ['GIT_EMAIL'] = statiki.GIT_EMAIL\n os.environ['GH_TOKEN'] = 'this-is-a-bogus-token:password'\n os.environ['TRAVIS_REPO_SLUG'] = TEST_REPO\n\n return", "def environment_vars_set():\n os.environ[\"YESSSSMS_LOGIN\"] = \"03211234567\"\n os.environ[\"YESSSSMS_PASSWD\"] = \"MySecr3t\"\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n os.environ[\"YESSSSMS_RECIPIENT\"] = \"066356789789\"", "def _setup_environment_and_configs(args, appengine_path):\n clusterfuzz_dir = os.path.abspath(os.path.join(args.directory, 'clusterfuzz'))\n\n # Matches startup scripts.\n os.environ['PYTHONPATH'] = ':'.join([\n os.getenv('PYTHONPATH', ''),\n appengine_path,\n os.path.join(clusterfuzz_dir, 'src'),\n ])\n\n os.environ['ROOT_DIR'] = clusterfuzz_dir\n if not os.getenv('BOT_NAME'):\n os.environ['BOT_NAME'] = args.name\n\n os.environ['LD_LIBRARY_PATH'] = '{0}:{1}'.format(\n os.path.join(clusterfuzz_dir, 'src', 'clusterfuzz', '_internal',\n 'scripts'), os.getenv('LD_LIBRARY_PATH', ''))\n\n tmpdir = os.path.join(clusterfuzz_dir, 'bot_tmpdir')\n if not os.path.exists(tmpdir):\n os.mkdir(tmpdir)\n os.environ['TMPDIR'] = tmpdir\n os.environ['BOT_TMPDIR'] = tmpdir\n\n os.environ['KILL_STALE_INSTANCES'] = 'False'\n os.environ['LOCAL_DEVELOPMENT'] = 'True'\n os.environ['DATASTORE_EMULATOR_HOST'] = constants.DATASTORE_EMULATOR_HOST\n os.environ['PUBSUB_EMULATOR_HOST'] = constants.PUBSUB_EMULATOR_HOST\n os.environ['APPLICATION_ID'] = constants.TEST_APP_ID\n\n if not os.getenv('UNTRUSTED_WORKER'):\n local_gcs_buckets_path = os.path.abspath(\n os.path.join(args.server_storage_path, 'local_gcs'))\n assert os.path.exists(local_gcs_buckets_path), (\n 'Server storage path not found, make sure to start run_server with '\n 'the same storage path.')\n\n os.environ['LOCAL_GCS_BUCKETS_PATH'] = local_gcs_buckets_path\n\n if args.android_serial:\n if not os.getenv('OS_OVERRIDE'):\n os.environ['OS_OVERRIDE'] = 'ANDROID'\n\n os.environ['ANDROID_SERIAL'] = args.android_serial", "def test_local_env_pass_implicit(fileutils) -> None:\n exp_value = str(uuid.uuid4())\n env_key = \"test_local_env_pass_implicit\"\n os.environ[env_key] = exp_value\n\n test_dir = fileutils.make_test_dir()\n exp_dir = f\"{test_dir}/exp\"\n os.makedirs(exp_dir)\n script = fileutils.get_test_conf_path(\"check_env.py\")\n\n exp = Experiment(\"LRZ\", exp_path=exp_dir, launcher=\"slurm\")\n\n exe_name = \"python\"\n exe_args = [script, env_key]\n\n # Create the RunSettings associated with the workload manager (WLM) run command\n run_args = {\"--nodes\": 1, \"--ntasks\": 1, \"--time\": \"00:01:00\"}\n # NOTE: not passing env_args into run_settings here, relying on --export=ALL default\n settings = RunSettings(exe_name, exe_args, run_command=\"srun\", run_args=run_args)\n app_name = \"echo_app\"\n app = exp.create_model(app_name, settings)\n\n # generate the experiment structure and start the model\n exp.generate(app, overwrite=True)\n exp.start(app, block=True, summary=False)\n\n assert env_key not in settings.env_vars\n os.environ.pop(env_key)\n\n with open(f\"{exp_dir}/{app_name}/{app_name}.out\") as app_outfile:\n app_output = app_outfile.read()\n \n # verify application was able to access the env var\n assert f\"{env_key}=={exp_value}\" in app_output", "def setup_environment():\n os.environ['QUEUE_OVERRIDE'] = 'LINUX_UNTRUSTED'\n os.environ['WORKER_ROOT_DIR'] = os.path.join(MNT_DIR, 'clusterfuzz')\n os.environ['WORKER_BOT_TMPDIR'] = os.path.join(MNT_DIR, 'tmp')\n\n if not os.path.exists(BOT_BASEDIR):\n os.mkdir(BOT_BASEDIR)", "def _setup_environment_vars(self, opts):\n # Check that these directories actually exist\n assert os.path.isdir(opts.movie_advisor_home)\n\n #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)\n\n self.movie_advisor_home = opts.movie_advisor_home\n self.bento_home = opts.bento_home\n self.bento_tgz = opts.bento_tgz\n self.kiji_uri = \"kiji://.env/tutorial\"\n\n # \"express job\" takes a jar file as an argument\n assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))\n\n # Set the classpath for all of the commands that we'll run\n jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]\n for jar in jarsFullPaths: assert os.path.isfile(jar)\n\n classpath = \":\".join(jarsFullPaths)\n os.environ['KIJI_CLASSPATH'] = classpath\n\n if opts.show_classpath:\n print(\"export KIJI_CLASSPATH=%s\" % classpath)\n sys.exit(0)", "def env_config_no_cluster():\n # setup\n env = {'ELB_GCP_PROJECT': 'expected-gcp-project',\n 'ELB_RESULTS': 'gs://expected-results'}\n\n for var_name in env:\n os.environ[var_name] = env[var_name]\n # Test that the results parameter is passed correctly and that trailing slash is discarded\n os.environ['ELB_RESULTS'] = TEST_RESULTS_BUCKET + '/'\n\n yield env\n\n # cleanup\n for var_name in env:\n # os.unsetenv does not work on every system\n del os.environ[var_name]", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def _init_env_variables(self):\n raise NotImplementedError()", "def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id", "def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]", "def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))", "def SetupEnvironment(self):\n pass", "def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e", "def test_env_var_settings_set(config, environment_vars_set_wowww):\n sms = YesssSMS.YesssSMS()\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"wowww\"\n\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"goood\"\n\n del os.environ[\"YESSSSMS_PROVIDER\"]\n sms = YesssSMS.YesssSMS(\"123456\")\n assert sms._logindata[\"login_rufnummer\"] == \"03211234567\"\n assert sms._logindata[\"login_passwort\"] == \"MySecr3t\"\n assert sms._provider == \"yesss\"\n\n del os.environ[\"YESSSSMS_LOGIN\"]\n sms = YesssSMS.YesssSMS(\"123456\", \"password\")\n assert sms._logindata[\"login_rufnummer\"] == \"123456\"\n assert sms._logindata[\"login_passwort\"] == \"password\"\n assert sms._provider == \"yesss\"", "def get_env(self):\n self.airflow_cluster_name = conf.get('core', 'cluster')\n bicommon = BICommon()\n self.env_type = bicommon.env\n\n self.parameters.update({'airflow_cluster_name': self.airflow_cluster_name, 'env': self.env_type})", "def qa():\n env.config_file = 'config_production.py'\n env.hosts = ['[email protected]:34165']\n env.host_type = 'qa'\n env.user = 'ombu'\n env.host_webserver_user = 'www-data'\n env.host_site_path = '/mnt/main/qa/qa2/public'", "def set_env():\n env.local_dotenv_path = os.path.join(\n os.path.dirname(__file__), 'etc/base_image/.env')\n dotenv.load_dotenv(env.local_dotenv_path)\n env.project_name = os.environ.get('PROJECT_NAME', '')\n env.project_dir = posixpath.join('/srv/images/', env.project_name)\n env.use_ssh_config = True\n\n # Bug: when setting this inside a function. Using host_string as workaround\n env.hosts = [os.environ.get('HOST_NAME', ''), ]\n env.host_string = os.environ.get('HOST_NAME', '')\n\n env.base_image_name = os.environ.get('BASE_IMAGE_NAME', '')\n env.build_dir = '/srv/build'\n env.local_path = os.path.dirname(__file__)", "def _setup_environment(environment):\n env.environment = environment\n env.project = ENVS[environment]\n env.hosts = [env.project['host']]\n env.user = env.project.get('user', env.local_user)\n env.password = env.project.get('password', None)\n # Redundant, just to easy the interpolation later on\n env.project['environment'] = environment", "def load_evironment():\n environment = Utility.load_yaml(os.getenv(\"system_file\", \"./system.yaml\"))\n for key in environment:\n if key in os.environ:\n environment[key] = os.getenv(key)\n Utility.environment = environment", "def get_environment_configuration():\n\n try:\n time_limit = int(os.getenv('AUTOBOT_POST_TIMELIMIT'))\n except TypeError:\n time_limit = None\n\n # if we're using Redis Labs\n redis_cloud_url = os.getenv('REDISCLOUD_URL')\n\n if redis_cloud_url:\n url = urlparse.urlparse(redis_cloud_url)\n redis_host = url.hostname\n redis_port = url.port\n redis_password = url.password\n else:\n redis_host = os.getenv('AUTOBOT_REDIS_URL')\n redis_port = os.getenv('AUTOBOT_REDIS_PORT')\n redis_password = None\n\n override = {\n REDDIT_USERNAME: os.getenv('AUTOBOT_REDDIT_USERNAME'),\n REDDIT_PASSWORD: os.getenv('AUTOBOT_REDDIT_PASSWORD'),\n SUBREDDIT: os.getenv('AUTOBOT_SUBREDDIT'),\n CLIENT_ID: os.getenv('AUTOBOT_CLIENT_ID'),\n CLIENT_SECRET: os.getenv('AUTOBOT_CLIENT_SECRET'),\n POST_TIMELIMIT: time_limit,\n REDIS_BACKEND: os.getenv('AUTOBOT_REDIS_BACKEND'),\n REDIS_URL: redis_host,\n REDIS_PORT: redis_port,\n REDIS_PASSWORD: redis_password,\n ROLLBAR_ACCESS_TOKEN: os.getenv('ROLLBAR_ACCESS_TOKEN'),\n ROLLBAR_ENVIRONMENT: os.getenv('ROLLBAR_ENVIRONMENT')\n }\n\n # remove all the 'None' valued things\n return {k: v for k, v in override.items() if v is not None}", "def _setup_env():\n env.home_path = os.path.expanduser('~')\n env.env_path = os.getenv('WORKON_HOME')\n\n if not env.env_path:\n warn(\"You should set the WORKON_HOME environment variable to\" \\\n \" the root directory for your virtual environments.\")\n env.env_path = env.sites_path\n\n env.project_path = join(env.sites_path, env.project_name)\n env.ve_path = join(env.env_path, env.project_name)\n env.activate_path = join(env.ve_path, 'bin', 'activate')", "def build_env(self, job, private_data_dir, private_data_files=None):\n env = super(RunJob, self).build_env(job, private_data_dir, private_data_files=private_data_files)\n if private_data_files is None:\n private_data_files = {}\n # Set environment variables needed for inventory and job event\n # callbacks to work.\n env['JOB_ID'] = str(job.pk)\n env['INVENTORY_ID'] = str(job.inventory.pk)\n if job.project:\n env['PROJECT_REVISION'] = job.project.scm_revision\n env['ANSIBLE_RETRY_FILES_ENABLED'] = \"False\"\n env['MAX_EVENT_RES'] = str(settings.MAX_EVENT_RES_DATA)\n if hasattr(settings, 'AWX_ANSIBLE_CALLBACK_PLUGINS') and settings.AWX_ANSIBLE_CALLBACK_PLUGINS:\n env['ANSIBLE_CALLBACK_PLUGINS'] = ':'.join(settings.AWX_ANSIBLE_CALLBACK_PLUGINS)\n env['AWX_HOST'] = settings.TOWER_URL_BASE\n\n # Create a directory for ControlPath sockets that is unique to each job\n cp_dir = os.path.join(private_data_dir, 'cp')\n if not os.path.exists(cp_dir):\n os.mkdir(cp_dir, 0o700)\n # FIXME: more elegant way to manage this path in container\n env['ANSIBLE_SSH_CONTROL_PATH_DIR'] = '/runner/cp'\n\n # Set environment variables for cloud credentials.\n cred_files = private_data_files.get('credentials', {})\n for cloud_cred in job.cloud_credentials:\n if cloud_cred and cloud_cred.credential_type.namespace == 'openstack' and cred_files.get(cloud_cred, ''):\n env['OS_CLIENT_CONFIG_FILE'] = to_container_path(cred_files.get(cloud_cred, ''), private_data_dir)\n\n for network_cred in job.network_credentials:\n env['ANSIBLE_NET_USERNAME'] = network_cred.get_input('username', default='')\n env['ANSIBLE_NET_PASSWORD'] = network_cred.get_input('password', default='')\n\n ssh_keyfile = cred_files.get(network_cred, '')\n if ssh_keyfile:\n env['ANSIBLE_NET_SSH_KEYFILE'] = ssh_keyfile\n\n authorize = network_cred.get_input('authorize', default=False)\n env['ANSIBLE_NET_AUTHORIZE'] = str(int(authorize))\n if authorize:\n env['ANSIBLE_NET_AUTH_PASS'] = network_cred.get_input('authorize_password', default='')\n\n path_vars = (\n ('ANSIBLE_COLLECTIONS_PATHS', 'collections_paths', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),\n ('ANSIBLE_ROLES_PATH', 'roles_path', 'requirements_roles', '~/.ansible/roles:/usr/share/ansible/roles:/etc/ansible/roles'),\n ('ANSIBLE_COLLECTIONS_PATH', 'collections_path', 'requirements_collections', '~/.ansible/collections:/usr/share/ansible/collections'),\n )\n\n config_values = read_ansible_config(os.path.join(private_data_dir, 'project'), list(map(lambda x: x[1], path_vars)))\n\n for env_key, config_setting, folder, default in path_vars:\n paths = default.split(':')\n if env_key in env:\n for path in env[env_key].split(':'):\n if path not in paths:\n paths = [env[env_key]] + paths\n elif config_setting in config_values:\n for path in config_values[config_setting].split(':'):\n if path not in paths:\n paths = [config_values[config_setting]] + paths\n paths = [os.path.join(CONTAINER_ROOT, folder)] + paths\n env[env_key] = os.pathsep.join(paths)\n\n return env" ]
[ "0.70271873", "0.7026732", "0.69392926", "0.69331855", "0.68138427", "0.6760369", "0.6758569", "0.6714527", "0.67041445", "0.66552407", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.65845776", "0.6584399", "0.65421516", "0.6530069", "0.6528909", "0.64960116", "0.64579433", "0.6447718", "0.6442308", "0.6439525", "0.6437336", "0.6370697", "0.6345632", "0.6339555", "0.6325007" ]
0.7265635
0
The config can specify a resource manager server address as "driver", which means the workflow should launch the resource manager on the scheduler machine. Make sure it launches, but is also shut down after the workflow exits.
def test_resource_manager_on_driver(): config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "resource-manager": { "server": "driver", "port": 4000, "config": { "read_reqs": 123, "read_data": 456, "write_reqs": 789, "write_data": 321 } } } template_dir = tempfile.mkdtemp(suffix="test-resource-manager-on-driver-template") with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) @checkrun def execute(workflow_inst): client = ResourceManagerClient('127.0.0.1', 4000) mgr_config = client.read_config() assert mgr_config == config["resource-manager"]["config"], \ "Resource manager config does not match the one in the workflow config" _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute) assert execute.didrun # FIXME: For mysterious reasons, the check below does not work on Travis-CI. # Somehow, read_config() succeeds despite the fact that # the resource manager server was already terminated?? if os.environ.get('TRAVIS', '') == 'true': pytest.skip("Skipping resource manager shutdown check on Travis-CI") # Server should not be running any more after workflow exits. with pytest.raises(TimeoutError): client2 = ResourceManagerClient('127.0.0.1', 4000) client2.read_config()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def launch(config):\n \n launch_with_configs([config])", "def run_xenon_simple(workflow, machine, worker_config):\n scheduler = Scheduler()\n\n return scheduler.run(\n xenon_interactive_worker(machine, worker_config),\n get_workflow(workflow)\n )", "def test_set_power_schedule_for_deployment_run(self):\n pass", "def test_relaunch_deployment_run(self):\n pass", "def launch_instance_manager():\n # Todo: Use name servers in the docker contexct (set up a docker compose?)\n # pyro4-ns\n parser = argparse.ArgumentParser(\"python3 launch_instance_manager.py\")\n parser.add_argument(\"--seeds\", type=str, default=None, \n help=\"The default seed for the environment.\")\n parser.add_argument(\"--seeding_type\", type=str, default=SeedType.CONSTANT, \n help=\"The seeding type for the environment. Defaults to 1 (CONSTANT)\"\n \"if a seed specified, otherwise 0 (NONE): \\n{}\".format(SeedType.__doc__))\n\n \n parser.add_argument(\"--max_instances\", type=int, default=None,\n help=\"The maximum number of instances the instance manager is able to spawn,\"\n \"before an exception is thrown. Defaults to Unlimited.\")\n opts = parser.parse_args()\n\n \n if opts.max_instances is not None:\n assert opts.max_instances > 0, \"Maximum instances must be more than zero!\"\n InstanceManager.MAXINSTANCES = opts.max_instances\n \n\n try:\n print(\"Removing the performance directory!\")\n try:\n shutil.rmtree(InstanceManager.STATUS_DIR)\n except:\n pass\n finally:\n if not os.path.exists(InstanceManager.STATUS_DIR):\n os.makedirs(InstanceManager.STATUS_DIR)\n print(\"autoproxy?\",Pyro4.config.AUTOPROXY)\n InstanceManager.REMOTE = True\n Pyro4.config.COMMTIMEOUT = InstanceManager.KEEP_ALIVE_PYRO_FREQUENCY \n\n # Initialize seeding.\n if opts.seeds is not None:\n InstanceManager._init_seeding(seeds=opts.seeds, seed_type=opts.seeding_type)\n else:\n InstanceManager._init_seeding(seed_type=SeedType.NONE)\n\n \n Pyro4.Daemon.serveSimple(\n {\n InstanceManager: INSTANCE_MANAGER_PYRO\n },\n ns = True)\n \n except Pyro4.errors.NamingError as e:\n print(e)\n print(\"Start the Pyro name server with pyro4-ns and re-run this script.\")", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def test_launch_deployment(self):\n pass", "def init():\n\n @click.command()\n @click.option('--approot', type=click.Path(exists=True),\n envvar='TREADMILL_APPROOT', required=True)\n @click.option('--instance', help='Publisher instance.')\n def run(approot, instance):\n \"\"\"Starts discovery publisher process.\"\"\"\n tm_env = appenv.AppEnvironment(approot)\n publisher = endpoints.EndpointPublisher(tm_env.endpoints_dir,\n context.GLOBAL.zk.conn,\n instance=instance)\n publisher.run()\n\n return run", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def _manageWorkersConfig(event):\n if event.info.get('key') != PluginSettings.SLICER_CLI_WEB_WORKER_CONFIG_ITEM:\n return\n if _loadWorkerConfig():\n _manageWorkers(None)", "def test_scheduler_runs():\n import mesos.native\n\n # Make sure fake_mysos_executor.pex is available to be fetched by Mesos slave.\n assert os.path.isfile('dist/fake_mysos_executor.pex')\n\n storage = FakeStorage(SequentialThreadingHandler())\n zk_client = FakeClient(storage=storage)\n zk_client.start()\n\n zk_url = \"zk://fake_host/home/mysos/clusters\"\n cluster_name = \"test_cluster\"\n num_nodes = 3\n\n state_provider = LocalStateProvider(safe_mkdtemp())\n\n framework_info = FrameworkInfo(\n user=getpass.getuser(),\n name=\"mysos\",\n checkpoint=False)\n\n state = Scheduler(framework_info)\n\n scheduler = MysosScheduler(\n state,\n state_provider,\n getpass.getuser(),\n os.path.abspath(\"dist/fake_mysos_executor.pex\"),\n \"./fake_mysos_executor.pex\",\n zk_client,\n zk_url,\n Amount(40, Time.SECONDS),\n \"/fakepath\",\n gen_encryption_key())\n\n RootMetrics().register_observable('scheduler', scheduler)\n\n scheduler_driver = mesos.native.MesosSchedulerDriver(\n scheduler,\n framework_info,\n \"local\")\n scheduler_driver.start()\n\n # Wait until the scheduler is connected and becomes available.\n assert scheduler.connected.wait(30)\n\n scheduler.create_cluster(cluster_name, \"mysql_user\", num_nodes, cluster_password=\"passwd\")\n\n # A slave is promoted to be the master.\n deadline(\n lambda: wait_for_master(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n scheduler.delete_cluster(cluster_name, password=\"passwd\")\n\n # The cluster is deleted from ZooKeeper.\n deadline(\n lambda: wait_for_termination(\n get_cluster_path(posixpath.join(zk_url, 'discover'), cluster_name),\n zk_client),\n Amount(40, Time.SECONDS))\n\n sample = RootMetrics().sample()\n assert sample['scheduler.tasks_killed'] == 1\n\n assert scheduler_driver.stop() == DRIVER_STOPPED", "def _configure_regular_job(config, job_exe, job_type, system_logging_level):\n config.create_tasks(['pull', 'pre', 'main', 'post'])\n config.add_to_task('pull', args=create_pull_command(job_exe.docker_image))\n config.add_to_task('pre', args=PRE_TASK_COMMAND_ARGS)\n config.add_to_task('post', args=POST_TASK_COMMAND_ARGS)\n\n # Configure input workspaces\n ro_input_workspaces = {}\n rw_input_workspaces = {}\n for input_workspace in config.get_input_workspace_names():\n ro_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RO)\n rw_input_workspaces[input_workspace] = TaskWorkspace(input_workspace, MODE_RW)\n config.add_to_task('pre', workspaces=ro_input_workspaces)\n config.add_to_task('main', workspaces=ro_input_workspaces)\n # Post tasks have access to input workspaces in case input files need moved as part of parse results\n config.add_to_task('post', workspaces=rw_input_workspaces)\n\n # Configure output workspaces\n output_workspaces = {}\n for output_workspace in config.get_output_workspace_names():\n output_workspaces[output_workspace] = TaskWorkspace(output_workspace, MODE_RW)\n config.add_to_task('post', workspaces=output_workspaces)\n\n # Configure input/output mounts\n input_mnt_name = 'scale_input_mount'\n output_mnt_name = 'scale_output_mount'\n input_vol_name = get_job_exe_input_vol_name(job_exe)\n output_vol_name = get_job_exe_output_vol_name(job_exe)\n input_vol_ro = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RO, is_host=False)\n input_vol_rw = Volume(input_vol_name, SCALE_JOB_EXE_INPUT_PATH, MODE_RW, is_host=False)\n output_vol_ro = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RO, is_host=False)\n output_vol_rw = Volume(output_vol_name, SCALE_JOB_EXE_OUTPUT_PATH, MODE_RW, is_host=False)\n\n config.add_to_task('pre', mount_volumes={input_mnt_name: input_vol_rw, output_mnt_name: output_vol_rw},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n config.add_to_task('main', mount_volumes={input_mnt_name: input_vol_ro, output_mnt_name: output_vol_rw})\n config.add_to_task('post', mount_volumes={output_mnt_name: output_vol_ro},\n env_vars={'SYSTEM_LOGGING_LEVEL': system_logging_level})\n\n\n # Configure output directory\n env_vars = {'OUTPUT_DIR': SCALE_JOB_EXE_OUTPUT_PATH, 'INPUT_METADATA': SCALE_INPUT_METADATA_PATH}\n args = config._get_task_dict('main')['args']\n\n args = environment_expansion(env_vars, args)\n\n config.add_to_task('main', args=args, env_vars=env_vars)\n\n # Configure task resources\n resources = job_exe.get_resources()\n # Pull-task and pre-task require full amount of resources\n config.add_to_task('pull', resources=resources)\n config.add_to_task('pre', resources=resources)\n # Main-task no longer requires the input file space\n resources.subtract(NodeResources([Disk(job_exe.input_file_size)]))\n config.add_to_task('main', resources=resources)\n # Post-task no longer requires any disk space\n resources.remove_resource('disk')\n config.add_to_task('post', resources=resources)", "def launch(config_list):\n p = PyRosLaunch(config_list)\n p.start()\n p.spin()", "def update_worker():\n from test import get_remote_runner\n runner = get_remote_runner()\n runner.run(\"python2.7 /vagrant/bootstrap_lxc_manager.py --update_only=True\")", "def test_get_server_runnable(self):\n global locator, config_paths\n locator.load_config(config_paths[2])\n\n self.assertIsNotNone(locator.get_server_runnable())", "def test_mgr_start_stop(self, runpath):\n driver = self.MyDriver(name=\"MyDriver\", runpath=runpath)\n\n assert not driver.pre_start_called\n assert not driver.post_start_called\n\n with driver:\n assert driver.pre_start_called\n assert driver.post_start_called\n assert not driver.pre_stop_called\n assert not driver.post_stop_called\n\n assert driver.pre_stop_called\n assert driver.post_stop_called", "def main(config):\n command = config.workflow_utils.command\n try:\n subprocess.run(command, shell=True, check=True)\n except AttributeError as exp:\n # add in some backward compatibility for py2.7\n subprocess.check_call(command, shell=True)", "def test_cron_workflow_service_create_cron_workflow(self):\n pass", "def test_workflows_restart(self):\n pass", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")", "def run_xenon(\n workflow, *, machine, worker_config, n_processes, deref=False,\n verbose=False):\n\n dynamic_pool = DynamicPool(machine)\n\n for i in range(n_processes):\n cfg = copy(worker_config)\n cfg.name = 'xenon-{0:02}'.format(i)\n dynamic_pool.add_xenon_worker(cfg)\n\n job_keeper = JobKeeper()\n S = Scheduler(job_keeper=job_keeper, verbose=verbose)\n\n result = S.run(\n dynamic_pool, get_workflow(workflow)\n )\n\n dynamic_pool.close_all()\n\n if deref:\n return worker_config.registry().dereference(result, host='scheduler')\n else:\n return result", "def runTestCase(self):\n \n #Login\n self.login() \n \n #Performing Configure Resources \n ResultCR, statusCR = self.test_configureResourec()\n \n if statusCR:\n self.succeed(\"Configure Resources Step Successfully Completed %s\"%ResultCR)\n \n else:\n self.failure(\"Failed to Configure Resources Step %s\"%ResultCR)\n \n time.sleep(120)", "def test_cron_workflow_service_terminate_cron_workflow(self):\n pass", "def init_workflow():\n pass", "def _do_bootstrap(self, configs=None):\n pass", "def test_config(self):\n\n # We start in uninitialized state.\n # In this state there is no driver process.\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)\n \n # Ping the agent.\n retval = self._ia_client.ping_agent()\n log.info(retval)\n\n # Initialize the agent.\n # The agent is spawned with a driver config, but you can pass one in\n # optinally with the initialize command. This validates the driver\n # config, launches a driver process and connects to it via messaging.\n # If successful, we switch to the inactive state.\n cmd = AgentCommand(command=ResourceAgentEvent.INITIALIZE)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.INACTIVE)\n\n # Ping the driver proc.\n retval = self._ia_client.ping_resource()\n log.info(retval)\n\n decoder = IonObjectDeserializer(obj_registry=get_obj_registry())\n\n # Grab the alarms defined in the config.\n retval = decoder.deserialize(self._ia_client.get_agent(['alarms'])['alarms'])\n\n \"\"\"\n {'status': None, 'stream_name': 'parsed', 'name': 'test_sim_warning',\n 'upper_bound': 5.0, 'expr': 'x<5.0', 'upper_rel_op': '<',\n 'lower_rel_op': None, 'type_': 'IntervalAlarmDef', 'value_id': 'temp',\n 'lower_bound': None, 'message': 'Temperature is above test range of 5.0.',\n 'current_val': None, 'type': 1}\n \"\"\"\n self.assertEqual(retval[0].type_, 'IntervalAlarmDef')\n self.assertEqual(retval[0].upper_bound, 5.0)\n self.assertEqual(retval[0].expr, 'x<5.0')\n \n # Reset the agent. This causes the driver messaging to be stopped,\n # the driver process to end and switches us back to uninitialized.\n cmd = AgentCommand(command=ResourceAgentEvent.RESET)\n retval = self._ia_client.execute_agent(cmd)\n state = self._ia_client.get_agent_state()\n self.assertEqual(state, ResourceAgentState.UNINITIALIZED)", "def run_init_new_resource(self,\n name,\n xd_resource_id,\n ppn,\n sshUserName,\n remoteAccessNode,\n localScratch,\n networkScratch,\n akrrData,\n appKerDir,\n batchScheduler,\n authMeth=None,\n sshPassword = None,\n sshPrivateKeyFile = None,\n sshPrivateKeyPassword = None\n ):\n #start bash shell\n bash = self.getBash(setAKRRenv=True,cdToAKRR_HOME=True)\n bash.output=\"\"\n bash.timeoutMessage='Unexpected behavior of init_new_resource.sh (premature EOF or TIMEOUT)'\n \n fasttimeout=3\n slowtimeout=30\n #start prep script\n bash.startcmd(\"$AKRR_HOME/setup/scripts/init_new_resource.sh\")\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter resource_id for import \\(enter 0 for no match\\):.*\\n',\n '0' if xd_resource_id==None else str(xd_resource_id),timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter AKRR resource name, hit enter to use same name as in XDMoD Database \\[.*\\]:.*\\n',\n '' if name==None else name,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter queuing system on resource \\(slurm or pbs\\):.*\\n',\n '' if batchScheduler==None else batchScheduler,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter Resource head node \\(access node\\) full name \\(e.g. headnode.somewhere.org\\):.*\\n',\n '' if remoteAccessNode==None else remoteAccessNode,timeout=fasttimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter username for resource access:.*\\n',\n '' if sshUserName==None else sshUserName,timeout=fasttimeout)\n \n iMatch=bash.justExpect([r'\\[.*INFO.*\\]: Can access resource without password',\n r'\\[.*INFO.*\\]: Can not access resource without password'],\n timeout=fasttimeout)\n if iMatch==0:\n if authMeth!=None:\n #i.e. the test is to go throurg list\n raise Exception(\"Passwordless access is already set-up, but expectation is to set new access method\")\n elif iMatch==1:\n #Select authentication method:\n # 0 The private and public keys was generated manually, right now. Try again.\n # 1 Use existing private and public key.\n # 2 Generate new private and public key.\n # 3 Use password directly.\n #[INPUT]: Select option from list above:\n bash.expectSendline(r'\\[.*INPUT.*]: Select option from list above:.*\\n\\[.*\\]',\n '' if authMeth==None else str(authMeth),timeout=fasttimeout)\n \n if authMeth==None or authMeth==2:\n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n bash.expectSendline(r'\\[.*INPUT.*]: Enter private key name:.*\\n\\[.*\\]',\n '' if sshPrivateKeyFile==None else str(sshPrivateKeyFile),timeout=fasttimeout)\n bash.expectSendline(r'\\[.*INPUT.*]: Enter passphrase for new key \\(leave empty for passwordless access\\):.*\\n',\n '' if sshPrivateKeyPassword==None else str(sshPrivateKeyPassword),timeout=fasttimeout)\n elif authMeth==3:\n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n elif authMeth==1:\n output=bash.justExpect(r'\\[.*INPUT.*]: Select key number from list above:.*\\n',timeout=fasttimeout)\n if sshPrivateKeyFile!=None:\n pkeys={}\n for l in output.splitlines():\n m=re.match(r'^\\s*(\\d+) \\s*(\\S+)',l)\n if m:\n pkeys[m.group(2)]=m.group(1)\n if sshPrivateKeyFile not in pkeys:\n raise Exception(\"Unknown private key: \"+sshPrivateKeyFile)\n bash.startcmd(str(pkeys[sshPrivateKeyFile]))\n else:\n bash.startcmd('0')\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter password for.*\\n',\n '' if sshPassword==None else str(sshPassword),timeout=fasttimeout)\n #sshPrivateKeyPassword\n bash.expectSendline(r'\\[.*INPUT.*]: Enter processors \\(cores\\) per node count:.*\\n',\n '' if ppn==None else str(ppn),timeout=slowtimeout)\n \n bash.expectSendline(r'\\[.*INPUT.*]: Enter location of local scratch \\(visible only to single node\\):.*\\n\\[.*\\]',\n '' if localScratch==None else str(localScratch),timeout=fasttimeout)\n\n bash.expectSendline(r'\\[.*INPUT.*]: Enter location of network scratch \\(visible only to all nodes\\), used for temporary storage of app kernel input/output:.*\\n',\n '' if networkScratch==None else str(networkScratch),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n\n bash.expectSendline(r'\\[.*INPUT.*]: Enter future location of app kernels input and executable files:.*\\n\\[.*\\]',\n '' if appKerDir==None else str(appKerDir),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n\n bash.expectSendline(r'\\[.*INPUT.*\\]: Enter future locations for app kernels working directories \\(can or even should be on scratch space\\):.*\\n\\[.*\\]',\n '' if akrrData==None else str(akrrData),timeout=fasttimeout)\n bash.justExpect(r'\\[.*INFO.*\\]: Directory exist and accessible for read/write')\n \n #wait for prompt\n output=bash.justExpect(bash.prompt,timeout=slowtimeout)\n \n delattr(bash, 'timeoutMessage')\n return copy.deepcopy(bash.output)", "def api(self, config):\n\n # Generate workflow file\n workflow = os.path.join(tempfile.gettempdir(), \"workflow.yml\")\n with open(workflow, \"w\", encoding=\"utf-8\") as f:\n f.write(config)\n\n os.environ[\"CONFIG\"] = workflow\n txtai.api.application.start()\n server = Server(txtai.api.application.app)\n with server.service():\n uid = 0\n while True:\n stop = st.empty()\n click = stop.button(\"stop\", key=uid)\n if not click:\n time.sleep(5)\n uid += 1\n stop.empty()" ]
[ "0.59481937", "0.5393874", "0.5334805", "0.53214973", "0.52877617", "0.5262214", "0.52576655", "0.52325445", "0.51960254", "0.5173219", "0.51681364", "0.5132155", "0.51199645", "0.5117904", "0.5104992", "0.5102506", "0.5085037", "0.5080357", "0.50710267", "0.5063733", "0.5049085", "0.5040776", "0.5025511", "0.50168973", "0.49915764", "0.49845466", "0.49802625", "0.49795437", "0.49559587", "0.49388114" ]
0.67965436
0
The config can specify a script to be run on each worker upon cluster initialization. This test verifies that it is launched and active while the workflow runs, and that it is launched on each worker, or just once per machine, depending on the config.
def test_worker_initialization(setup_worker_initialization_template): template_dir, _config, once_per_machine = setup_worker_initialization_template num_workers = 2 if once_per_machine or CLUSTER_TYPE in ("synchronous", "processes"): expected_script_count = 1 else: expected_script_count = num_workers @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count > 0, f"Worker script is not running. Check logs in:\n{script_dir}" assert script_count <= expected_script_count, f"Worker script started too many times. Check logs in:\n{script_dir}" assert script_count == expected_script_count, f"Worker script not started on all workers. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) assert script_count == 0, \ ("Worker script(s) remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_cluster_jobs_script(self):\r\n\r\n qiime_config = load_qiime_config()\r\n submit_script = qiime_config['cluster_jobs_fp']\r\n\r\n if (submit_script):\r\n full_path = which(submit_script)\r\n if full_path:\r\n submit_script = full_path\r\n self.assertTrue(exists(submit_script),\r\n \"cluster_jobs_fp is not set to a valid path in qiime config: %s\" % submit_script)\r\n # check if executable\r\n self.assertTrue(access(submit_script, X_OK),\r\n \"cluster_jobs_fp is not executable: %s\" % submit_script)\r\n else:\r\n # Can't run in parallel, but not a critical error\r\n pass", "def setup_worker_initialization_template(request):\n once_per_machine = request.param\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-initialization\")\n\n worker_script = f\"{template_dir}/do-nothing.sh\"\n with open(worker_script, 'w') as f:\n f.write(\"#!/bin/bash\\n\")\n f.write(\"sleep 10\")\n os.chmod(worker_script, 0o777)\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"do-nothing.sh\",\n \"only-once-per-machine\": once_per_machine,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n return template_dir, config, once_per_machine", "def test_configurator(self):\n runner = Runner(YamlManifest(manifest))\n run1 = runner.run(JobOptions(resource=\"test1\"))\n assert not run1.unexpectedAbort, run1.unexpectedAbort.getStackTrace()\n assert len(run1.workDone) == 1, run1.workDone\n result = list(run1.workDone.values())[0].result\n self.assertEqual(result.outputs, {\"fact1\": \"test1\", \"fact2\": \"test\"})\n self.assertEqual(result.result.get(\"stdout\"), sys.executable)\n assert run1.status == Status.ok, run1.summary()", "def test_worker_dvid_initialization():\n repo_dir = Path(flyemflows.__file__).parent.parent\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-dvid\")\n \n # Copy worker script/config into the template\n shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml',\n f'{template_dir}/dvid.toml')\n \n shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh',\n f'{template_dir}/launch-worker-dvid.sh')\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"launch-worker-dvid.sh\",\n \"only-once-per-machine\": True,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 1.0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n def is_worker_dvid_running():\n return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert is_worker_dvid_running(), f\"Worker DVID is not running. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n assert not is_worker_dvid_running(), \\\n (\"Worker DVID remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def launch(self):\n self.register_env_creator()\n\n # All worker nodes will block at this step during training\n ray_cluster_config = self.ray_init_config()\n if not self.is_master_node:\n return\n\n # Start the driver on master node\n ray.init(**ray_cluster_config)\n experiment_config = self.get_experiment_config()\n experiment_config = self.customize_experiment_config(experiment_config)\n print(\"Running experiment with config %s\" % json.dumps(experiment_config, indent=2))\n run_experiments(experiment_config)\n\n all_wokers_host_names = self.get_all_host_names()[1:]\n # If distributed job, send TERMINATION_SIGNAL to all workers.\n if len(all_wokers_host_names) > 0:\n self.sage_cluster_communicator.create_s3_signal(TERMINATION_SIGNAL)", "def launch(config):\n \n launch_with_configs([config])", "def run_experiment(experiment: str):\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n print_color(f\"* {datetime.now().strftime('%Y-%m-%d %H:%M:%S')} Experiment: {experiment}\", bcolors.OKBLUE)\n print_color(\"***************************************************************************************************\", bcolors.OKBLUE)\n\n experiment_file = experiment + \".yaml\"\n\n # Set namespace to check\n with open(f\"./litmus/{experiment_file}\") as f:\n spec = yaml.load(f, Loader=yaml.FullLoader)\n result_name = spec['metadata']['name']\n namespace = spec['metadata']['namespace']\n\n print_color(f\"Running Litmus ChaosEngine Experiment {experiment_file} in namespace {namespace}\")\n print_color(f\"Deploying {experiment_file}...\")\n run_shell(f\"kubectl delete chaosengine {result_name} -n {namespace}\")\n run_shell(f\"kubectl create -f ./litmus/{experiment_file} -n {namespace}\")\n\n # Check status of experiment execution\n startTime = datetime.now()\n print_color(f\"{startTime.strftime('%Y-%m-%d %H:%M:%S')} Running experiment...\")\n expStatusCmd = \"kubectl get chaosengine \" + result_name + \" -o jsonpath='{.status.experiments[0].status}' -n \" + namespace\n run_shell(expStatusCmd)\n logs_cmd = f\"kubectl logs --since=10s -l name={experiment} -n {namespace}\"\n print(f\"\\n{bcolors.OKGREEN}//** Experiment Logs ({logs_cmd}) **//\\n\\n\")\n try:\n while subprocess.check_output(expStatusCmd, shell=True).decode('unicode-escape') != \"Completed\":\n os.system(logs_cmd)\n os.system(\"sleep 10\")\n\n print(f\"\\n\\n//** End of Experiment Logs **//{bcolors.ENDC}\\n\")\n\n # View experiment results\n run_shell(f\"kubectl describe chaosresult {result_name}-{experiment} -n {namespace}\")\n\n except:\n print_color(\"User has cancelled script execution.\", bcolors.FAIL)\n sys.exit(2)\n\n # Store Experiment Result\n status = subprocess.check_output(\"kubectl get chaosresult \" + result_name + \"-\" + experiment + \" -n \" + namespace + \" -o jsonpath='{.status.experimentstatus.verdict}'\", shell=True).decode('unicode-escape')\n return ExperimentResult(experiment, status, startTime)", "def verify_runconfig(master_host, namespace, job_name, replica, num_ps,\n num_workers, num_evaluators):\n is_chief = True\n num_replicas = 1\n if replica == \"ps\":\n is_chief = False\n num_replicas = num_ps\n elif replica == \"worker\":\n is_chief = False\n num_replicas = num_workers\n elif replica == \"evaluator\":\n is_chief = False\n num_replicas = num_evaluators\n\n # Construct the expected cluster spec\n chief_list = [\n \"{name}-chief-0.{ns}.svc:2222\".format(name=job_name, ns=namespace)\n ]\n ps_list = []\n for i in range(num_ps):\n ps_list.append(\"{name}-ps-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n worker_list = []\n for i in range(num_workers):\n worker_list.append(\"{name}-worker-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n evaluator_list = []\n for i in range(num_evaluators):\n evaluator_list.append(\"{name}-evaluator-{index}.{ns}.svc:2222\".format(\n name=job_name, index=i, ns=namespace))\n cluster_spec = {\n \"chief\": chief_list,\n \"ps\": ps_list,\n \"worker\": worker_list,\n }\n if num_evaluators > 0:\n cluster_spec[\"evaluator\"] = evaluator_list\n\n for i in range(num_replicas):\n full_target = \"{name}-{replica}-{index}\".format(\n name=job_name, replica=replica.lower(), index=i)\n actual_config = get_runconfig(master_host, namespace, full_target)\n full_svc = \"{ft}.{ns}.svc\".format(ft=full_target, ns=namespace)\n expected_config = {\n \"task_type\": replica,\n \"task_id\": i,\n \"cluster_spec\": cluster_spec,\n \"is_chief\": is_chief,\n \"master\": \"grpc://{fs}:2222\".format(fs=full_svc),\n \"num_worker_replicas\": num_workers + 1, # Chief is also a worker\n \"num_ps_replicas\": num_ps,\n } if not replica == \"evaluator\" else {\n # Evaluator has special config.\n \"task_type\": replica,\n \"task_id\": 0,\n \"cluster_spec\": {},\n \"is_chief\": is_chief,\n \"master\": \"\",\n \"num_worker_replicas\": 0,\n \"num_ps_replicas\": 0,\n }\n\n # Compare expected and actual configs\n if actual_config != expected_config:\n msg = \"Actual runconfig differs from expected. Expected: {0} Actual: {1}\".format(\n str(expected_config), str(actual_config))\n logging.error(msg)\n raise RuntimeError(msg)", "def main():\n parser = argparse.ArgumentParser(description=main.__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument('--config', required=True, help='Configuration file for run. Must be in shared_dir')\n parser.add_argument('-c', '--cluster_size', required=True, help='Number of workers desired in the cluster.')\n parser.add_argument('-s', '--sample_size', required=True, type=float, help='Size of the sample deisred in TB.')\n parser.add_argument('-t', '--instance_type', default='c3.8xlarge', help='e.g. m4.large or c3.8xlarge.')\n parser.add_argument('-n', '--cluster_name', required=True, help='Name of cluster.')\n parser.add_argument('--namespace', default='jtvivian', help='CGCloud NameSpace')\n parser.add_argument('--spot_price', default=0.60, help='Change spot price of instances')\n parser.add_argument('-b', '--bucket', default='tcga-data-cgl-recompute', help='Bucket where data is.')\n parser.add_argument('-d', '--shared_dir', required=True,\n help='Full path to directory with: pipeline script, launch script, config, and master key.')\n params = parser.parse_args()\n\n # Run sequence\n start = time.time()\n # Get number of samples from config\n with open(params.config, 'r') as f:\n num_samples = len(f.readlines())\n # Launch cluster and pipeline\n uuid = fix_launch(params)\n launch_cluster(params)\n ids = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')\n launch_pipeline(params)\n # Blocks until all workers are idle\n stop = time.time()\n # Collect metrics from cluster\n collect_metrics(ids, list_of_metrics, start, stop, uuid=uuid)\n # Apply \"Insta-kill\" alarm to every worker\n map(apply_alarm_to_instance, ids)\n # Kill leader\n logging.info('Killing Leader')\n leader_id = get_instance_ids(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-leader')[0]\n apply_alarm_to_instance(leader_id, threshold=5)\n # Generate Run Report\n avail_zone = get_avail_zone(filter_cluster=params.cluster_name, filter_name=params.namespace + '_toil-worker')[0]\n total_cost, avg_hourly_cost = calculate_cost(params.instance_type, ids[0], avail_zone)\n # Report values\n output = ['UUID: {}'.format(uuid),\n 'Number of Samples: {}'.format(num_samples),\n 'Number of Nodes: {}'.format(params.cluster_size),\n 'Cluster Name: {}'.format(params.cluster_name),\n 'Source Bucket: {}'.format(params.bucket),\n 'Average Hourly Cost: ${}'.format(avg_hourly_cost),\n 'Cost per Instance: ${}'.format(total_cost),\n 'Availability Zone: {}'.format(avail_zone),\n 'Start Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(start))),\n 'Stop Time: {}'.format(datetime.isoformat(datetime.utcfromtimestamp(stop))),\n 'Total Cost of Cluster: ${}'.format(float(total_cost) * int(params.cluster_size)),\n 'Cost Per Sample: ${}'.format((float(total_cost) * int(params.cluster_size) / int(num_samples)))]\n with open(os.path.join(str(uuid) + '_{}'.format(str(datetime.utcnow()).split()[0]), 'run_report.txt'), 'w') as f:\n f.write('\\n'.join(output))\n # You're done!\n logging.info('\\n\\nScaling Test Complete.')", "def run_experiment():\n pass", "def test_by_config(self):\n # addon_executor = AddonExecutor(execute_order, stop_order)\n # self.assertEqual(expected, addon_executor.execute_with_config(addon))\n\n self.run_mgr.by_default(self.cli_inst)\n output = self._get_lines_as_list(sys.stdout)\n\n self.assertTrue(output[0].startswith('Start'))\n self.assertTrue(output[1].startswith('Execute'))\n self.assertTrue(output[2].startswith('Stop'))", "def run(config):\n\tlog.debug('-- in example.py')\n#\tgetWLSMachineandandExecuteSecondary(config)\n#\t__createPegaConfigCommand(config)\n#\tcreateUsers(config)\n#\t__connectAdminServer(config)\n\tconnectAdminServerOverSSL(config)", "def run(config):\n locator = cea.inputlocator.InputLocator(config.scenario)\n print('Key in run')\n print(config.bigmacc.key)\n i = config.bigmacc.key\n print(i)\n # SCENARIO SETUP ---\n config.general.project = os.path.join(config.bigmacc.data, config.general.parent, i)\n print(config.general.project)\n cea.datamanagement.data_initializer.main(config)\n # use the scenario code to set the year for the lca and other operations that need the current year\n pathway_code = config.general.parent\n pathway_items = pathway_code.split('_')\n scenario_year = int(pathway_items[1])\n config.emissions.year_to_calculate = scenario_year\n\n bigmacc_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, 'bigmacc_out', config.bigmacc.round)\n\n scen_check = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'), index_col='Unnamed: 0')\n experiment_key = 'exp_{}'.format(i)\n print(experiment_key)\n keys = [int(x) for x in str(i)]\n if experiment_key in scen_check['Experiments'].values.tolist():\n print('Experiment was finished previously, moving to next.')\n pass\n else:\n print('START: experiment {}.'.format(i))\n\n # INITIALIZE TIMER ---\n t0 = time.perf_counter()\n if os.path.exists(os.path.join(config.bigmacc.data, config.general.parent, i)):\n print(' - Folder exists for experiment {}.'.format(i))\n else:\n os.mkdir(os.path.join(config.bigmacc.data, config.general.parent, i))\n print(' - Folder does not exist for experiment {}, creating now.'.format(i))\n\n # run the archetype mapper to leverage the newly loaded typology file and set parameters\n print(' - Running archetype mapper for experiment {} to remove changes made in the last experiment.'.format(i))\n cea.datamanagement.archetypes_mapper.main(config)\n\n # run the rule checker to set the scenario parameters\n print(' - Running rule checker for experiment {}.'.format(i))\n cea.bigmacc.bigmacc_rules.main(config)\n\n # SIMULATIONS ---\n\n print(' - Run radiation is {}.'.format(config.bigmacc.runrad))\n print(' - Write sensor data is {}.'.format(config.radiation.write_sensor_data))\n # checking on need for radiation simulation\n\n if config.bigmacc.runrad == True:\n # this nested statement is for when we rerun the simulations and no longer need to run the unique radiation\n if config.bigmacc.rerun != True:\n print(' - Running radiation simulation for experiment {}.'.format(i))\n if os.path.exists(locator.get_radiation_building('B000')):\n print(' - Radiation folder exists for experiment {}, copying.'.format(i))\n else:\n print(' - Radiation running for experiment {}.'.format(i))\n cea.resources.radiation_daysim.radiation_main.main(config)\n else:\n # print(' - Copying radiation simulation data from previous run for experiment {}.'.format(i))\n old_rad_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'solar-radiation')\n # distutils.dir_util.copy_tree(old_rad_files, locator.get_solar_radiation_folder())\n else:\n radfiles = config.bigmacc.copyrad\n # print(' - Copying radiation results from {}.'.format(radfiles))\n # distutils.dir_util.copy_tree(radfiles, locator.get_solar_radiation_folder())\n print(' - Experiment {} does not require new radiation simulation.'.format(i))\n\n # running demand forecasting\n if os.path.exists(locator.get_schedule_model_file('B000')):\n print(' - Schedules exist for experiment {}.'.format(i))\n else:\n print(' - Schedule maker running for experiment {}.'.format(i))\n schedule_maker.main(config)\n\n # check to see if we need to rerun demand or if we can copy\n if config.bigmacc.rerun != True:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n if keys[0] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n elif keys[6] == 1:\n print(' - Running demand simulation for experiment {}.'.format(i))\n cea.demand.demand_main.main(config)\n else:\n cea.demand.demand_main.main(config)\n # print(' - Looking for demand results data from previous run for experiment {}.'.format(i))\n # old_demand_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n # config.general.scenario_name, 'outputs', 'data', 'demand')\n # if os.path.exists(old_demand_files):\n # # print(' - Copy demand results files from previous run of experiment {}.'.format(i))\n # # distutils.dir_util.copy_tree(old_demand_files, locator.get_demand_results_folder())\n # pass\n # else:\n # print(' - No results found.')\n # print(' - Running demand simulation for experiment {}.'.format(i))\n # cea.demand.demand_main.main(config)\n\n if config.bigmacc.pv == True:\n print(' - Run PV is {}.'.format(config.bigmacc.pv))\n if config.bigmacc.rerun == True:\n print(' - Looking for radiation simulation data from previous run for experiment {}.'.format(i))\n old_pv_files = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data', 'potentials', 'solar')\n if os.path.exists(old_pv_files):\n # print(' - Copying PV files from previous run of experiment {}.'.format(i))\n # distutils.dir_util.copy_tree(old_pv_files, locator.solar_potential_folder())\n pass\n else:\n print(' - PV files do not exist for previous run of experiment {} at {}.'.format(i, old_pv_files))\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n else:\n # if PV simulation is needed, run it.\n print(' - Running PV simulation for experiment {}.'.format(i))\n photovoltaic.main(config)\n\n print('Run water-body exchange is {}.'.format(config.bigmacc.water))\n # if water-body simulation is needed, run it.\n if config.bigmacc.water == True:\n print(' - Running water body simulation for experiment {}.'.format(i))\n water.main(config)\n\n # recalculating the supply split between grid and ng in the websrook DH\n if keys[4] == 1:\n print(' - Do not run district heat recalculation.')\n else:\n print(' - Run district heat recalculation.')\n cea.bigmacc.wesbrook_DH.main(config)\n\n if keys[7] == 1:\n print(' - PV use detected. Adding PV generation to demand files.')\n util.write_pv_to_demand(config)\n else:\n print(' - No PV use detected.')\n\n # running the emissions and costing calculations\n print(' - Run cost and emissions scripts.')\n cea.analysis.costs.system_costs.main(config)\n cea.analysis.lca.main.main(config)\n\n # clone out the simulation inputs and outputs directory\n print(' - Transferring results directory for experiment {}.'.format(i))\n\n new_inputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'inputs')\n new_outputs_path = os.path.join(config.bigmacc.data, config.general.parent, i,\n config.general.scenario_name, 'outputs', 'data')\n\n if config.bigmacc.rerun != True:\n distutils.dir_util.copy_tree(locator.get_data_results_folder(), new_outputs_path)\n distutils.dir_util.copy_tree(locator.get_input_folder(), new_inputs_path)\n\n time_elapsed = time.perf_counter() - t0\n\n # save log information\n log_df = pd.read_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'),\n index_col='Unnamed: 0')\n log_df = log_df.append(pd.DataFrame({'Experiments': 'exp_{}'.format(i),\n 'Completed': 'True',\n 'Experiment Time': '%d.2 seconds' % time_elapsed,\n 'Unique Radiation': config.bigmacc.runrad}, index=[0]), ignore_index=True)\n log_df.to_csv(os.path.join(bigmacc_outputs_path, 'logger.csv'))\n log_df.to_csv(r\"C:\\Users\\justi\\Desktop\\126logger_backup.csv\", )\n\n # write netcdf of hourly_results\n netcdf_writer.main(config, time='hourly')\n\n if config.bigmacc.rerun != True:\n shutil.rmtree(locator.get_costs_folder())\n shutil.rmtree(locator.get_demand_results_folder())\n shutil.rmtree(locator.get_lca_emissions_results_folder())\n shutil.rmtree(locator.get_solar_radiation_folder())\n shutil.rmtree(locator.get_potentials_folder())\n else:\n print(' - Rerun does not require purging of the files.')\n\n # when the setpoint is changed it is in a deeper database than the archetypes mapper can reach so reset it here\n if keys[0] == 1:\n cea.datamanagement.data_initializer.main(config)\n else:\n pass\n print('END: experiment {}. \\n'.format(i))", "def test_resource_manager_on_driver():\n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"resource-manager\": {\n \"server\": \"driver\",\n \"port\": 4000,\n \"config\": {\n \"read_reqs\": 123,\n \"read_data\": 456,\n \"write_reqs\": 789,\n \"write_data\": 321\n }\n }\n }\n \n template_dir = tempfile.mkdtemp(suffix=\"test-resource-manager-on-driver-template\")\n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n \n @checkrun\n def execute(workflow_inst):\n client = ResourceManagerClient('127.0.0.1', 4000)\n mgr_config = client.read_config()\n assert mgr_config == config[\"resource-manager\"][\"config\"], \\\n \"Resource manager config does not match the one in the workflow config\"\n \n _execution_dir, _workflow = launch_flow(template_dir, 1, _custom_execute_fn=execute)\n assert execute.didrun\n \n # FIXME: For mysterious reasons, the check below does not work on Travis-CI.\n # Somehow, read_config() succeeds despite the fact that\n # the resource manager server was already terminated??\n if os.environ.get('TRAVIS', '') == 'true':\n pytest.skip(\"Skipping resource manager shutdown check on Travis-CI\")\n\n # Server should not be running any more after workflow exits.\n with pytest.raises(TimeoutError):\n client2 = ResourceManagerClient('127.0.0.1', 4000)\n client2.read_config()", "def provision(args):\n cfg_file = os.path.join(xbow.XBOW_CONFIGDIR, \"settings.yml\")\n\n with open(cfg_file, 'r') as ymlfile:\n cfg = yaml.safe_load(ymlfile)\n\n scheduler = get_by_name(cfg['scheduler_name'])\n if len(scheduler) == 0:\n raise ValueError('Error - cannot find the scheduler')\n elif len(scheduler) > 1:\n raise ValueError('Error - more than one scheduler found')\n workers = get_by_name(cfg['worker_pool_name'])\n if len(workers) == 0:\n print('Warning: no workers found')\n all_nodes = scheduler + workers\n all_cis = [ConnectedInstance(i) for i in all_nodes]\n with open(args.script, 'r') as f:\n for line in f:\n if len(line) > 0 and line[0] == '#':\n print(line[:-1])\n elif len(line) > 0 :\n command = line[:-1]\n if command.split()[0] != 'sudo':\n command = 'sudo ' + command\n print(command + ' : ', end='', flush=True)\n result = exec_all(all_cis, command)\n status = np.all(np.array(result) == 0)\n if status:\n print('OK')\n else:\n print('FAILED')\n for i in range(len(result)):\n if result[i] != 0:\n if i == 0:\n print('Error on scheduler:')\n else:\n print('Error on worker {}'.format(i-1))\n print(all_cis[i].output)\n break\n else:\n status = False\n print(line[:-1], ' : ERROR')\n break\n\n return status", "def setUp(self):\n self.spark, self.log, self.config = start_spark(app_name = \"test_etl_job\",\n files='configs/etl_config.json')", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def test_run_experiment_locally(self) -> None:\n\n experiment = Experiment(\n name=\"torchx_booth_sequential_demo\",\n search_space=SearchSpace(parameters=self._parameters),\n optimization_config=OptimizationConfig(objective=self._objective),\n runner=self._runner,\n is_test=True,\n properties={Keys.IMMUTABLE_SEARCH_SPACE_AND_OPT_CONF: True},\n )\n\n scheduler = Scheduler(\n experiment=experiment,\n generation_strategy=(\n choose_generation_strategy(\n search_space=experiment.search_space,\n )\n ),\n options=SchedulerOptions(),\n )\n\n try:\n for _ in range(3):\n scheduler.run_n_trials(max_trials=2)\n\n # TorchXMetric always returns trial index; hence the best experiment\n # for min objective will be the params for trial 0.\n scheduler.report_results()\n except FailureRateExceededError:\n pass # TODO(ehotaj): Figure out why this test fails in OSS.\n # Nothing to assert, just make sure experiment runs.", "def test_run_started(self):", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def run(ceph_cluster, **kwargs) -> int:\n log.info(\"Running RBD Sanity tests.\")\n\n config = kwargs[\"config\"]\n script_dir = config[\"script_path\"]\n script = config[\"script\"]\n\n nodes = config.get(\"nodes\", [])\n rhbuild = config.get(\"rhbuild\")\n\n if nodes:\n nodes = get_nodes_by_ids(ceph_cluster, nodes)\n else:\n # By default, tests would be executed on a single client node\n nodes = [ceph_cluster.get_nodes(role=\"client\")[0]]\n\n os_ver = rhbuild.split(\"-\")[-1]\n if \"4.\" in rhbuild and os_ver == \"8\":\n nodes[0].exec_command(\n cmd=\"sudo /usr/sbin/alternatives --set python /usr/bin/python3\"\n )\n\n if rhbuild[0] > \"4\":\n out, err = nodes[0].exec_command(\n sudo=True, cmd=\"ceph config get mon mon_allow_pool_delete --format json\"\n )\n\n if not json.loads(out):\n nodes[0].exec_command(\n sudo=True, cmd=\"ceph config set mon mon_allow_pool_delete true\"\n )\n\n for node in nodes:\n branch = config.get(\"branch\", get_tag(node))\n one_time_setup(node, rhbuild, branch=branch)\n\n cmd = f\"cd ceph/{script_dir}; sudo bash {script}\"\n if script == \"*\":\n cmd = f\"cd ceph/{script_dir}; for test in $(ls); do sudo bash $test; done\"\n\n node.exec_command(cmd=cmd, check_ec=True, timeout=1200)\n\n return 0", "def run_starter(self, expect_to_fail=False):", "def test_launch_deployment(self):\n pass", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def main():\n rclpy.init()\n\n worker_id = int(sys.argv[1])\n policy_type = sys.argv[2]\n node = WorkerSync(worker_id, 'worker_node', policy_type)\n\n try:\n executor = MultiThreadedExecutor()\n steps = 0\n\n while rclpy.ok():\n if node.flag.pull:\n node.pull(executor)\n\n elif node.flag.collect:\n steps = node.collect()\n\n elif node.flag.compute:\n node.compute(steps)\n\n elif node.flag.push:\n experiment_complete = node.push(executor)\n node.upkeep()\n\n # End experiment if passed number of max episodes.\n if experiment_complete:\n node.test(100)\n break\n\n except KeyboardInterrupt:\n pass\n\n # Destroy the node explicitly\n node.destroy_node()\n rclpy.shutdown()", "def runFunc(runType):\n logger.info('Running test locally with development environment')\n runProcess('local', ['invoke', '-v', '--config-file', 'run_config.yaml'])", "def uq_ensemble(config=\"dummy_test\", script=\"ERROR: PARAMETER script SHOULD BE DEFINED FOR TASK UQ_ENSEMBLE\",**args):\n \n path_to_config = find_config_file_path(config)\n sweep_dir = path_to_config + \"/SWEEP\"\n env.script = script\n\n run_ensemble(config, sweep_dir, **args)", "def execute_experiment(self):\n protocol_name = self.protocol_config['protocol']\n number_of_repetitions = self.protocol_config['numOfRepetitions']\n configurations = self.protocol_config['configurations']\n working_directory = self.protocol_config['workingDirectory']\n executables = self.protocol_config['executableName']\n for i in range(number_of_repetitions):\n for idx2 in range(len(configurations)):\n for idx in range(len(executables)):\n os.system(f'fab -f Execution/fabfile.py run_protocol:{self.protocol_config_path},'\n f'{configurations[idx2]},{executables[idx]},{working_directory[idx]} --parallel | '\n f' tee WebApp/ExecutionLogs/{protocol_name}.log')", "def launch(\n key_name: str,\n size: int,\n master_type: str,\n worker_type: str,\n image_id: str,\n owner: str,\n bucket_name: str,\n worker_command: str,\n config: str,\n cluster_name: Optional[str],\n workers_per_machine: int\n):\n\n if cluster_name is None:\n # credit for the words_alpha.txt file https://github.com/dwyl/english-words\n cluster_name = random.choice([word for word in open(\"words_alpha.txt\")])[:-1]\n storage_name = cluster_name + '_' + datetime.now().strftime('%Y%m%d%H%M%S') # name of the file storage on s3\n head_tags, worker_tags = get_tags(owner, cluster_name, storage_name) # tags for head and workers\n\n print(f'Launching cluster named ------------ {cluster_name} --------------------- (storage_name: {storage_name})')\n print(f'---------------------------------------------------------------------------------------------------')\n\n ec2 = boto3.resource(\"ec2\")\n as_client = boto3.client('autoscaling')\n\n # compress and upload the source code to the s3\n repo_name = _compress_folder()\n filename = str(pathlib.Path.cwd().parent / TAR_NAME)\n print(f'Uploading {filename} to {storage_name}')\n up(bucket_name, storage_name, filename)\n # down(bucket_name, storage_name, filename) # just to check file available\n print(f'Upload finished')\n\n download_untar = f'rm -f /home/ubuntu/{TAR_NAME} && ' \\\n f'aws s3 cp s3://{bucket_name}/{storage_name} /home/ubuntu/{TAR_NAME} && ' + \\\n f'rm -rf /home/ubuntu/{repo_name} && ' + \\\n f'mkdir /home/ubuntu/{repo_name} && ' + \\\n f'tar -xvf /home/ubuntu/{TAR_NAME} -C /home/ubuntu/'\n\n head_command = 'python -u es/experiment.py with ' + config + ' local=False'\n master_script = make_master_script(download_untar, make_master_run_script(head_command), repo_name)\n\n print(f'master will run this: -------\\n{master_script}\\n--------------')\n\n master_instance = ec2.create_instances(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=master_type,\n MinCount=1,\n MaxCount=1,\n SecurityGroupIds=[DEFAULT_SECURITY_GROUP],\n UserData=master_script,\n # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/user-data.html\n TagSpecifications=[{'ResourceType': 'instance', 'Tags': head_tags}],\n IamInstanceProfile={'Name': 'redis_cluster_code_access'},\n # EbsOptimized=True,\n # Tags=head_tags\n )[0]\n\n master_ip = master_instance.private_ip_address\n\n print(f'Master launched, IP is: {master_ip}')\n scaling_client = boto3.client(\"autoscaling\")\n\n # try deleting the auto-scaling group and launch configuration of given name (should be done in the manage/kill)\n try:\n _ = scaling_client.delete_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n ForceDelete=True,\n )\n print(f'Auto scaling group named {cluster_name} deleted')\n # time.sleep(1)\n except:\n print(f'auto scaling group not found, skipping deletion')\n try:\n _ = scaling_client.delete_launch_configuration(\n LaunchConfigurationName=cluster_name\n )\n # time.sleep(1)\n print(f'Launch fonfig named {cluster_name} deleted')\n except:\n print(f'launch config not found, not deleting')\n\n worker_command = worker_command + f' --num_workers={workers_per_machine}'\n worker_script = make_worker_script(download_untar, make_worker_run_script(master_ip, worker_command), repo_name)\n print(f'Worker will run this: -------\\n{worker_script}\\n--------------')\n print(f'Creating launch configuration..')\n\n config_resp = as_client.create_launch_configuration(\n ImageId=image_id,\n KeyName=key_name,\n InstanceType=worker_type,\n LaunchConfigurationName=cluster_name,\n SecurityGroups=[DEFAULT_SECURITY_GROUP],\n UserData=worker_script,\n IamInstanceProfile=REDIS_CLUSTER_CODE_ACCESS,\n # EbsOptimized=True,\n )\n assert config_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'Creating auto scaling group..')\n\n asg_resp = as_client.create_auto_scaling_group(\n AutoScalingGroupName=cluster_name,\n LaunchConfigurationName=cluster_name,\n MinSize=size,\n MaxSize=size,\n DesiredCapacity=size,\n AvailabilityZones=AVAILABILITY_ZONES,\n Tags=worker_tags,\n )\n assert asg_resp[\"ResponseMetadata\"][\"HTTPStatusCode\"] == 200\n\n print(f'\\nCluster created, name: {cluster_name}\\n')", "def main() -> bool:\n global logger\n logger = setup_logger(\"nitpycker\")\n plugin_manager = Manager()\n plugin_manager.load_plugins()\n args = parse_args(plugin_manager)\n if plugin_manager.enable_plugins(args.plugins, args):\n exit(2)\n\n plugin_manager.pre_test_discovery()\n tests = unittest.defaultTestLoader.discover(args.start_directory, pattern=args.pattern)\n plugin_manager.post_test_discovery()\n tests = plugin_manager.filter_tests(tests)\n report = ParallelRunner(plugin_manager, process_number=args.process_number, verbosity=args.verbosity).run(tests)\n return not report.wasSuccessful()" ]
[ "0.6706494", "0.6451912", "0.62901366", "0.6208054", "0.6111138", "0.59998226", "0.59990007", "0.59590906", "0.5952131", "0.5922806", "0.5853597", "0.579472", "0.57890517", "0.57299614", "0.5724651", "0.5720135", "0.57154197", "0.57125825", "0.5700395", "0.5678336", "0.5675108", "0.5670209", "0.566518", "0.5664975", "0.5656063", "0.5654081", "0.5638076", "0.56345034", "0.56298196", "0.5617696" ]
0.70054567
0
You can provide an initialization script for each worker to call before the workflow starts. The most common usecase for such a script is to launch a local dvid server on each worker (for posting in parallel to the cloud). We provide the necessary script for local dvid workers outofthebox, in scripts/workerdvid. This test verifies that it works.
def test_worker_dvid_initialization(): repo_dir = Path(flyemflows.__file__).parent.parent template_dir = tempfile.mkdtemp(suffix="test-worker-dvid") # Copy worker script/config into the template shutil.copy(f'{repo_dir}/scripts/worker-dvid/dvid.toml', f'{template_dir}/dvid.toml') shutil.copy(f'{repo_dir}/scripts/worker-dvid/launch-worker-dvid.sh', f'{template_dir}/launch-worker-dvid.sh') config = { "workflow-name": "workflow", "cluster-type": CLUSTER_TYPE, "worker-initialization": { "script-path": "launch-worker-dvid.sh", "only-once-per-machine": True, "script-args": ["_TEST_SCRIPT_FAKE_ARG_"], # This is just here to make it easy to identify the process "launch-delay": 1.0 } } with open(f"{template_dir}/workflow.yaml", 'w') as f: yaml.dump(config, f) def is_worker_dvid_running(): return len(find_processes('_TEST_SCRIPT_FAKE_ARG_')) > 0 @checkrun def execute(workflow_inst): script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert is_worker_dvid_running(), f"Worker DVID is not running. Check logs in:\n{script_dir}" _execution_dir, workflow_inst = launch_flow(template_dir, 1, _custom_execute_fn=execute) script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent assert not is_worker_dvid_running(), \ ("Worker DVID remained running after the workflow exited."\ f"Check logs in:\n{script_dir}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_worker_initialization(setup_worker_initialization_template):\n template_dir, _config, once_per_machine = setup_worker_initialization_template\n \n num_workers = 2\n if once_per_machine or CLUSTER_TYPE in (\"synchronous\", \"processes\"):\n expected_script_count = 1\n else:\n expected_script_count = num_workers\n \n @checkrun\n def execute(workflow_inst):\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n assert script_count > 0, f\"Worker script is not running. Check logs in:\\n{script_dir}\"\n assert script_count <= expected_script_count, f\"Worker script started too many times. Check logs in:\\n{script_dir}\"\n assert script_count == expected_script_count, f\"Worker script not started on all workers. Check logs in:\\n{script_dir}\"\n \n _execution_dir, workflow_inst = launch_flow(template_dir, num_workers, _custom_execute_fn=execute)\n script_dir = Path(workflow_inst.config['worker-initialization']['script-path']).parent\n script_count = len(find_processes('_TEST_SCRIPT_FAKE_ARG_'))\n\n assert script_count == 0, \\\n (\"Worker script(s) remained running after the workflow exited.\"\\\n f\"Check logs in:\\n{script_dir}\")", "def setup_worker_initialization_template(request):\n once_per_machine = request.param\n template_dir = tempfile.mkdtemp(suffix=\"test-worker-initialization\")\n\n worker_script = f\"{template_dir}/do-nothing.sh\"\n with open(worker_script, 'w') as f:\n f.write(\"#!/bin/bash\\n\")\n f.write(\"sleep 10\")\n os.chmod(worker_script, 0o777)\n \n config = {\n \"workflow-name\": \"workflow\",\n \"cluster-type\": CLUSTER_TYPE,\n \n \"worker-initialization\": {\n \"script-path\": \"do-nothing.sh\",\n \"only-once-per-machine\": once_per_machine,\n \"script-args\": [\"_TEST_SCRIPT_FAKE_ARG_\"], # This is just here to make it easy to identify the process\n \"launch-delay\": 0\n }\n }\n \n with open(f\"{template_dir}/workflow.yaml\", 'w') as f:\n yaml.dump(config, f)\n\n return template_dir, config, once_per_machine", "def init_worker(self, worker_id) :\n\n # since this is called in a separate process,\n # we need to get a consistent view of the settings\n startup.main(self.mode, self.rank)\n\n # initialize the random seed for this process\n # we don't use just the worker_id but also the rank\n # so we truly get different random numbers in all workers,\n # not restricted to the current pool\n # note that we get some entropy from the time\n # so different epochs get different data augmentations\n np.random.seed((hash(time())\n + (settings.RANK * torch.utils.data.get_worker_info().num_workers\n + worker_id)) % 2**32)", "def init_worker(*shared_args_list):\n global SHARED_ARGS\n SHARED_ARGS = shared_args_list", "def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])", "def worker_init_fn(worker_id: int) -> None:\n worker_info = torch.utils.data.get_worker_info()\n set_rnd(worker_info.dataset, seed=worker_info.seed) # type: ignore[union-attr]", "def worker_init_fn(worker_id):\n worker_info = torch.utils.data.get_worker_info() # type: ignore\n if hasattr(worker_info.dataset, \"transform\") and hasattr(worker_info.dataset.transform, \"set_random_state\"):\n worker_info.dataset.transform.set_random_state(worker_info.seed % (2 ** 32))", "def worker_init_fn(self, worker_id: int) -> None:\n np.random.seed(np.random.get_state()[1][0] + worker_id + random.randint(1, 1000))\n\n worker_info = torch.utils.data.get_worker_info()\n worker_info.dataset.set_worker_id(worker_id)\n worker_info.dataset.examples, shard_stats = self.get_worker_shard(\n worker_info.dataset.examples, worker_info.num_workers, worker_id\n )\n worker_info.dataset.logger.info(\n f\"Stats for shard created for worker {worker_id}: \\n {shard_stats}\"\n )\n worker_info.dataset.create_language_index_mapping()", "def _init_workloads(self):\n php_cgi = '/usr/bin/php-cgi'\n workloads = []\n\n # Workloads served with node.js\n workloads.append(nodejs.NodeJS('yahvp', 'Yet Another HTML5 Video Player',\n [Arg('--port', '7000')]))\n\n return workloads", "def worker_init_fn(worker_id):\r\n base_seed = torch.IntTensor(1).random_().item()\r\n #print(worker_id, base_seed)\r\n np.random.seed(base_seed + worker_id)", "def init_workers():\n party_queue = Queue()\n p = Producer(party_queue)\n p.daemon = True\n c = Consumer(party_queue)\n c.deamon= True\n m = MasterUpdater(db,application_name)\n m.deamon = True\n p.start()\n c.start()\n m.start()", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def worker_init_fn(worker_id):\n np.random.seed(np.random.get_state()[1][0] + worker_id)", "def setup_run(args, config): \n\n token = jwtfile.read() # read the JWT so we can send it in the header\n api = config['API']\n if not args.cwl: # beginning of process\n # request to get available options\n hdrs = {'begin-setup': 'True', 'token': token}\n r = Request(api['setup-run-start'], headers=hdrs)\n try:\n resp = urlopen(r)\n # if marked as unverified, we must login first to get a new token\n except HTTPError as e:\n # TODO deal with plain 400\n if e.code in [401, 406]:\n print('Your token is unverified. Please log in for another token.')\n login(args, config) # trigger login method\n return\n else:\n print('Was expecting a 401 or 406, got a {}'.format(e.code))\n return\n # print out options to command line\n jsn = json.loads(resp.read().decode()).get('opts', None)\n print('\\nPlease select a CWL and job (.yml) file and re-run this command'\\\n ' with the `--cwl <cwl>` option:\\n')\n print('Available Options\\n----------------')\n for k, v in jsn.items():\n print('{}: {}'.format(k, v))\n return\n cwl_file = args.cwl # get the .cwl\n # ask for a job title so the sevrer can store this\n title = None\n while not title: # can't skip\n title = input('Please enter a title for the job you are creating: ')\n hdrs = {'cwl-input': 'True', 'cwl': cwl_file, 'token': token}\n pld = {'cwl': cwl_file, 'job_title': title}\n r = Request(api['setup-run-select-wkflow'], data=urlencode(pld).encode(), headers=hdrs, method='POST')\n try:\n resp = urlopen(r)\n # we expect a response to ask us questions\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Uh oh, looks like your token has expired. Please re-login.')\n elif e.getcode() == 404: # notfound\n print('A template couldn\\'t be properly generated for that Workflow.')\n else:\n print('Expected 401, 404, 406, got {}'.format(e.getcode()))\n return\n # invoke the questions prompt; iterate through each CWL key\n job_input_dict = {} # initialize empty dict to be updated\n # send the inputs back as JSON\n print('You requested the following Workflow: \\n')\n jsn = json.loads(resp.read().decode()) # bytes to str to dict\n wkflow = jsn.get('workflow', None)\n print(wkflow)\n print('\\n')\n _req = jsn.get('required') # dict, but only because we're using requests lib...\n _opt = jsn.get('optional')\n job_input_dict.update(ask_wkflow(_req, _opt))\n job_inputs = json.dumps(job_input_dict)\n d = {\n 'cwl': cwl_file, \n 'job_inputs': job_inputs,\n 'job_title': title, \n }\n h = {'token': token}\n r = Request(api['setup-run-job-input'], data=urlencode(d).encode(), headers=h, method='POST')\n try:\n resp = urlopen(r)\n except HTTPError as e:\n if e.getcode() in [401, 406]:\n print('Token expired; please re-login')\n else:\n print('Huh?')\n return\n jsn = json.loads(resp.read().decode())\n if jsn.get('errors', {}) == {}: # empty dict means no errors!\n print('Your JOB sucessfully validated.')\n else: # print all errors and ask person to do it again\n #print(r.json.get('errors'))\n print(jsn.get('errors'))\n return", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def worker_init_fn(worker_id, num_workers, rank, seed):\n\n worker_seed = num_workers * rank + worker_id + seed\n np.random.seed(worker_seed)\n random.seed(worker_seed)", "def __init__(self, run, expname):\n logger.debug('Initializing worker {}.'.format(rank))\n self.run = int(run)\n self.expname = expname\n bcast_var = None\n dsname = comm.bcast(bcast_var, root=0)\n print(dsname)\n \n print('********** Start setup.')\n t0 = time.time()\n self.dsIdx = psana.DataSource(str(dsname))\n logger.info('********** Datasource on rank {}: {}s'.format(rank, time.time()-t0))\n self.dsIdxRun = next(self.dsIdx.runs())\n self.parse_detectors()\n logger.info('Rank {} has datasource and detectors.'.format(rank))\n print('********** Setup on rank {}: {}s'.format(rank, time.time()-t0))\n return", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def init_workers(dist_mode):\n if dist_mode == 'ddp-file':\n from distributed.torch import init_workers_file\n return init_workers_file()\n elif dist_mode == 'ddp-mpi':\n from distributed.torch import init_workers_mpi\n return init_workers_mpi()\n elif dist_mode == 'cray':\n from distributed.cray import init_workers_cray\n return init_workers_cray()\n return 0, 1", "def setUp(self) :\n self.longMessage = True\n logger = corAna.makeLogger(isTestMode=True,isMaster=True,isViewer=True,isServer=True,rank=0)\n isFirstWorker = True\n self.numTimes = 5\n numDataPointsThisWorker = 1\n\n self.workerData = corAna.WorkerData(logger, isFirstWorker, self.numTimes,\n numDataPointsThisWorker, addRemoveCallbackObject = None)", "def main():\n rclpy.init()\n\n worker_id = int(sys.argv[1])\n policy_type = sys.argv[2]\n node = WorkerSync(worker_id, 'worker_node', policy_type)\n\n try:\n executor = MultiThreadedExecutor()\n steps = 0\n\n while rclpy.ok():\n if node.flag.pull:\n node.pull(executor)\n\n elif node.flag.collect:\n steps = node.collect()\n\n elif node.flag.compute:\n node.compute(steps)\n\n elif node.flag.push:\n experiment_complete = node.push(executor)\n node.upkeep()\n\n # End experiment if passed number of max episodes.\n if experiment_complete:\n node.test(100)\n break\n\n except KeyboardInterrupt:\n pass\n\n # Destroy the node explicitly\n node.destroy_node()\n rclpy.shutdown()", "def main():\n tester = Tester()\n # parse args, load configuration and create all required objects.\n tester.setup_experiment()\n # GO!\n tester.run_experiment()", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def create_worker(num_worker, server_ip, server_port):\n for i in range(int(num_worker)):\n print \"-- worker initializing --\"\n dask_server = Worker('tcp://'+server_ip+\":\"+str(server_port), loop=loop)\n dask_server.start()", "def run_worker(self):\n # TODO(xiejw): To allow execution framework to add train hooks.\n return self._start_distributed_training()", "def init_distributed(backend, world_size, rank, checkpoint_dir):\n # multi-gpu initial\n logger.debug(f'Initializing {world_size} workers')\n # Remove the init file from previous version\n init_dir = checkpoint_dir / 'shared_distributed'\n if init_dir.is_file():\n rm_file(init_dir)\n\n init_dir.mkdir(parents=True, exist_ok=True)\n init_file = init_dir / f'slurm-{slurm.job_id}'\n init_method = init_file.resolve().as_uri()\n dist.init_process_group(backend, world_size=world_size, rank=rank, init_method=init_method)\n logger.debug('Init finished')", "def _problem_run_experiments_initialise(self):\n pass", "def SetUp(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--task-hash')\n args, _ = parser.parse_known_args()\n\n self.task = self.CreateNewTask(\n isolated_hash=args.task_hash,\n dimensions={'os': 'Ubuntu-14.04'},\n idle_timeout_secs=90, connection_timeout_secs=90,\n verbosity=logging.DEBUG)\n self.task.Create()\n self.task.WaitForConnection()", "def test_training():\n assert init_engine('train', [\"config=first_run_test/default.yaml\"]).run() is None" ]
[ "0.7326153", "0.70049655", "0.6345922", "0.6308314", "0.6258606", "0.6253948", "0.6138363", "0.6117165", "0.61046404", "0.60405153", "0.6030067", "0.6017148", "0.6017148", "0.60138994", "0.5929832", "0.5922357", "0.5922357", "0.5841495", "0.5836372", "0.5836372", "0.5832595", "0.581463", "0.5772954", "0.5771031", "0.575313", "0.56777894", "0.56747687", "0.5659891", "0.5640645", "0.5577385" ]
0.7815065
0
Return the next power of 10
def nextpow10(n): if n == 0: return 0 else: return math.ceil(math.log10(abs(n)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next_pow_two(n):\n i = 1\n while i < n:\n i = i << 1\n return i", "def _next_power_of_two(self, n):\n if n == 0:\n return 1\n return int(2 ** math.ceil(math.log2(n)))", "def next_power2(num):\n return 2 ** int(np.ceil(np.log2(num)))", "def next_power_2(x: int) -> int:\n return 0 if x < 1 else shift_left_bit_length(x)", "def nextpow2(i):\n n = 1\n while n < i:\n n *= 2\n return n", "def _next_power_of_2(x):\n return 1 if x == 0 else 2**(x - 1).bit_length()", "def non_recursive_power(base, power):\n result = 1\n i = 0\n while i < power:\n result = result * base\n i = i+1\n\n return result", "def nextpow2(x):\n return int(numpy.ceil(numpy.log2(numpy.abs(x))))", "def nextPowerOf2(n):\n count = 0; \n \n # First n in the below \n # condition is for the \n # case where n is 0 \n if (n and not(n & (n - 1))): \n return n \n \n while( n != 0): \n n >>= 1\n count += 1\n \n return 1 << count;", "def recursive_power(base, power):\n if power == 0:\n return 1\n else:\n return base*recursive_power(base, power-1)", "def self_powers():\n return sum([i ** i for i in range(1, 1001)]) % (10 ** 10)", "def nextpow2(x):\n log2_n = math.ceil(math.log2(x))\n n = 2 ** log2_n\n return n", "def next_p2(num):\n rval = 1\n while rval < num:\n rval <<= 1\n return rval", "def _pow_(self, n):\n assert n > 0\n return generic_power(self, n)", "def next_p2 (num):\n rval = 1\n while rval<num:\n rval <<= 1\n return rval", "def nextpow2(n):\n m_f = np.log2(n)\n m_i = np.ceil(m_f)\n return np.int(2 ** m_i)", "def power(x, n):\n power = 1\n for i in range(abs(n)):\n power = multiply(power, x) \n return power", "def power(base, exp):\n\tans = [1]\n\twhile exp > 0:\n\t\tcarry = 0\n\t\tfor i in xrange(len(ans)):\n\t\t\tmult = ans[i] * base + carry\n\t\t\tans[i] = mult % 10\n\t\t\tcarry = mult / 10\n\t\twhile carry > 0:\n\t\t\tans.append(carry % 10)\n\t\t\tcarry /= 10\n\t\texp -= 1\n\treturn ans", "def power(number, power):\n return math.pow(number, power)", "def __pow__(self,n):\r\n\t\t\r\n\t\t# take power\r\n\t\tp = self.power(n)\r\n\t\t\r\n\t\treturn p", "def _find_nearest_power_of_two(x):\n\n return 1 << (x - 1).bit_length()", "def nextpow2(longitud_malla):\r\n n = 1\r\n while n < longitud_malla: n *= 2\r\n return n", "def improve_power(x):\r\n for i in range(2,base(x)//2+1):\r\n if(base(x)%i==0):\r\n temp=base(x)\r\n n=0\r\n flag=True\r\n while(temp>1):\r\n if(temp%i!=0):\r\n flag=False\r\n break\r\n else:\r\n temp=temp/i\r\n n=n+1\r\n if (flag):\r\n return(make_power(i,n*power(x)))\r\n return (make_power(x(0), x(1)))", "def nextpow2(x):\n return np.ceil(np.log2(np.abs(x)))", "def next_po2(n) -> int:\n if not n:\n return 1\n if is_po2(n):\n # n is a power of 2\n return n\n return 1 << (n - 1).bit_length()", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def power(x, n):\n value = 1\n for i in range(n):\n value = multiply(value, x)\n return value", "def power(x, n):\n if n == 0:\n return 1\n result = power(x, math.floor(n / 2))\n if n % 2 > 0:\n return x * result * result\n else:\n return result * result", "def prevpow2(i):\n n = 1\n while 2*n <= i: n *= 2\n return n", "def power(base, exponent):\n return base ** exponent" ]
[ "0.7424253", "0.73961514", "0.7280163", "0.7175434", "0.713948", "0.71352744", "0.69893503", "0.6905554", "0.68357366", "0.67452294", "0.6701021", "0.6685439", "0.6657409", "0.6649841", "0.66056854", "0.658991", "0.6583599", "0.656067", "0.6541835", "0.6518142", "0.6472119", "0.6443989", "0.6443932", "0.64288723", "0.638601", "0.6369569", "0.6360769", "0.63479626", "0.63443714", "0.63408923" ]
0.8007733
0
Return a number that looks 'nice', with a maximum error
def magicnr(value, error): magics = [ (10 ** (nextpow10(error))), (10 ** (nextpow10(error))) / 2.0, (10 ** (nextpow10(error))) / 4.0, (10 ** (nextpow10(error))) / 10.0, (10 ** (nextpow10(error))) / 20.0, (10 ** (nextpow10(error))) / 40.0, (10 ** (nextpow10(error))) / 100.0, ] magics.sort() magics.reverse() magic = magics[-1] for n in magics: if n < abs(value): magic = n break return fround(value, magic)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_precision(err):\n return max(0, int(-math.log10(2 * err)) + 1)", "def computeGoodMax(totalTimes, noerrs):\n # Could allow a small amount of space above the top, but it's annnoying for percentages!\n # return None\n factor = 1.00\n maxReading = factor * max(\n [max([v for v in l if v != None]) for l in list(totalTimes.values())]\n )\n if maxReading == 0:\n maxReading = 0.1\n decade = math.floor(math.log10(maxReading))\n scaledValue = maxReading * 10 ** (-decade)\n # print (\"maxReading: \",maxReading,\"decade: \",decade,\" scaledValue: \",scaledValue)\n for v in (\n 1.0,\n 1.1,\n 1.2,\n 1.25,\n 1.3,\n 1.4,\n 1.5,\n 1.6,\n 1.7,\n 1.75,\n 1.8,\n 1.9,\n 2.0,\n 2.5,\n 3.0,\n 4.0,\n 5.0,\n 6.0,\n 7.0,\n 7.5,\n 8.0,\n 9.0,\n ):\n if scaledValue <= v:\n # print (\"computeGoodMax: \", v * (10**decade))\n return v * (10 ** decade)\n # print (\"computeGoodMax: \", 10**(decade+1))\n return 10 ** (decade + 1)", "def enlarge(n):\r\n return n * 100", "def safe_calc(exponent):\n\n if exponent > 700:\n return sys.float_info.max\n else:\n return math.exp(exponent)", "def ensure_size(value):\n return int(round(value * 1.0 / base)) * base", "def ghmult_plain(x: int) -> str:\n mult = x / 10000\n if int(mult) == mult:\n mult = int(mult)\n return '{}'.format(mult)", "def ghmult(x: int) -> str:\n mult = x / 10000\n if int(mult) == mult:\n mult = int(mult)\n return '%sx' % mult", "def get_m(self, n, err):\n m = (n * -log2(err))/log(2)\n return int(m)", "def native_max_value(self) -> float:\n return 9", "def enlarge(n):\n\n return n* 100", "def enlarge(n):\n return n*100", "def pickNarrow(length):\n return(int(np.ceil(np.log10(length))))", "def fail_max(self) -> int:\n return self._fail_max", "def MakeHumanReadable(num):\n i = 0\n while i+1 < len(EXP_STRINGS) and num >= (2 ** EXP_STRINGS[i+1][0]):\n i += 1\n rounded_val = round(float(num) / 2 ** EXP_STRINGS[i][0], 2)\n return '%s %s' % (rounded_val, EXP_STRINGS[i][1])", "def _nice(x, round=False):\n if x <= 0:\n import warnings\n warnings.warn(\"Invalid (negative) range passed to tick interval calculation\")\n x = abs(x)\n expv = floor(log10(x))\n f = x / pow(10, expv)\n if round:\n if f < 1.75:\n nf = 1.0\n elif f < 3.75:\n nf = 2.5\n elif f < 7.0:\n nf = 5.0\n else:\n nf = 10.0\n else:\n if f <= 1.0:\n nf = 1.0\n elif f <= 2.5:\n nf = 2.5\n elif f <= 5.0:\n nf = 5.0\n else:\n nf = 10.0\n return nf * pow(10, expv)", "def max_error(self) -> float:\n return float(np.max(np.abs(self._flattened_errors())))", "def max_temp(self):\n return 99", "def problem_48():\n\n return int(str(sum(x**x for x in range(1, 1001)))[-10:])", "def err_func(x,rv,valore,specn,lcrop,models='da2014'):\n tmp = tmp_func(x[0], x[1], rv, specn, lcrop, models)\n if tmp != 1: return abs(tmp[3]-(valore+1.)) #this is quantity that gets minimized \n else: return 1E30", "def mamajek08_logRpHK_max():\n return -3.8918287373004357", "def normexponent(val):\n n = np.log10(val)\n if n < 0:\n n = int(n) - 1\n else:\n n = int(n)\n return n", "def maxim(self) -> (int, float('inf')):\n\t\treturn 2", "def test_to_knx_max_exceeded(self):\n with self.assertRaises(ConversionError):\n DPTValue1Ucount().to_knx(DPTValue1Ucount.value_max + 1)", "def humanvalue(self, value):\n if value > 1024 * 1024 * 1024:\n return \"%d\" % (value / 1024 / 1024 / 1024 / 1024)\n if value > 1024 * 1024:\n return \"%d\" % (value / 1024 / 1024)\n if value > 1024:\n return \"%d\" % (value / 1024 / 1024)", "def time_to_failure():\n return int(random.expovariate(BREAK_MEAN))\n #return MTBF", "def max_pp(level):\n base_pp = 6\n level_pp = 2 * level\n return base_pp + (level_pp - 2)", "def MINIMUM_BET() -> int:\n return 10", "def rand_uni_val() -> float:\n return random.uniform(0, 1)", "def native_max_value(self) -> float:\n return TEMP_MAXIMUM", "def calc_max_level(num_point):\n return int(numpy.ceil(numpy.log2(num_point)))" ]
[ "0.684441", "0.6396268", "0.620864", "0.6083352", "0.6061179", "0.6019383", "0.601921", "0.5970972", "0.5961703", "0.59382796", "0.5932441", "0.592237", "0.5899084", "0.5895729", "0.58700436", "0.58562726", "0.5854558", "0.58429986", "0.58234286", "0.5815658", "0.58116823", "0.58086294", "0.5805074", "0.57641387", "0.5745738", "0.5738901", "0.57242614", "0.57122165", "0.57103026", "0.5708048" ]
0.66190135
1
Get the path to a CSV by name.
def _get_csv_path(name): return os.path.join(cwd, 'output/app_info', name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def csv_path(name):\n return \"./data/%s\" % name", "def csv_dir(self):\n return op.join(self.root_dir, 'csv')", "def get_cached_csv(self, category: str) -> str:\n csv_path = f\"{self.csv_dir}/{category.lower()}.csv\"\n if path.exists(csv_path):\n return csv_path\n raise FileNotFoundError(f\"There is no {category.lower()} CSV written yet.\")", "def get_csv_path(url, destination):\n datafile_path = get_datafile_path(url, destination)\n return os.path.splitext(datafile_path)[0] + '-processed.csv'", "def get_csv(\n self,\n csv_name: str,\n csv_directory: Optional[str] = None,\n csv_output_name: Optional[str] = None,\n graph_type: Optional[str] = \"instance\",\n graph_id: Optional[str] = \"main\",\n ):\n self._check_connection()\n options = {}\n if csv_directory is None:\n csv_directory = os.getcwd()\n if csv_output_name is None:\n csv_output_name = csv_name\n options[\"name\"] = csv_name\n\n result = self._dispatch(\n \"get\",\n self._csv_url(graph_type, graph_id),\n options,\n )\n\n stream = open(f\"{csv_directory}/{csv_output_name}\", \"w\")\n stream.write(result)\n stream.close()", "def csvPathname(self, scenario, baseline=None, outputDir='.', type=RESULT_TYPE_SCENARIO):\n # Output files are stored in the output dir with same name as query file but with 'csv' extension.\n basename = os.path.basename(self.queryFile)\n mainPart, extension = os.path.splitext(basename)\n middle = scenario if type == RESULT_TYPE_SCENARIO else (\"%s-%s\" % (scenario, baseline))\n csvFile = \"%s-%s.csv\" % (mainPart, middle)\n csvPath = os.path.abspath(os.path.join(outputDir, csvFile))\n return csvPath", "def get_loc_year_csv(csv_name):\n fname = (csv_name.split('.'))[0].split('-')\n return fname[0], fname[1]", "def get_curve_path(curves_dir, star_id):\n curve_file = \"%s.csv\" % star_id\n curve_path = path.join(curves_dir, curve_file)\n\n return curve_path", "def sample_data_path(name):\n import os.path as op\n data_dir = op.join(op.dirname(__file__), \"data\")\n data_path = op.join(data_dir, name + \".csv\")\n return op.abspath(data_path)", "def data_characterization_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_characterization_dir(experiment_name) / iteration_csv", "def symbol_to_path(symbol, base_dir=None):\n if base_dir is None:\n base_dir = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def get_path(name: str) -> str:\n return _pooch.fetch(name)", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir,\"{}.csv\".format(str(symbol)))", "def get_csv_in_path(self, path):\n files = os.listdir((path))\n return files", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"../data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def get_csv_file_name(output_dir, file_prefix, file_suffix):\n\tcsv_filename = \"\".join([file_prefix, '_', file_suffix, '.csv'])\n\treturn os.path.join(output_dir, csv_filename)", "def get_items_path() -> Path:\n return Path(os.path.join(Path(os.path.realpath(__file__)).parent, \"items.csv\"))", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir=\"data\"):\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def symbol_to_path(symbol, base_dir= proj_path + '/data/'): \n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def symbol_to_path(symbol, base_dir=\"data\"):\r\n return os.path.join(base_dir, \"{}.csv\".format(str(symbol)))", "def data_abex_input_path(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n iteration_csv: str = f\"{iteration_name(iteration)}.csv\"\n return data_abex_input_dir(experiment_name) / iteration_csv", "def csv_file(input_file):\n\n current_dir = os.getcwd()\n directory_name = current_dir + '\\\\' + 'data' + '\\\\'\n csv_out = directory_name + input_file\n return csv_out", "def get_descendant_file_path(parent_path):\n csv_relative_path = []\n for root, dirs, files in os.walk(parent_path):\n for file in files:\n words = file.split(r'.')\n if words[-1] == 'csv':\n file_path = os.path.join(parent_path, file)\n csv_relative_path.append(file_path)\n return csv_relative_path", "def upload_csv_to_drive(csv_path: str, csv_name: str, folder_id: Optional[str] = None) -> str:\n if folder_id:\n csv_metadata = {'name': csv_name,\n 'parents': [folder_id]}\n else:\n csv_metadata = {'name': csv_name}\n\n csv_file = Path(f\"{csv_path}/{csv_name}\")\n media = MediaFileUpload(csv_file,\n mimetype='text/csv')\n file = drive_service().files().create(body=csv_metadata,\n media_body=media,\n fields='id').execute()\n\n return file.get('id')", "def load_csv(csv_path):\n\n try:\n # Tries to read .csv file into a dataframe\n csv = pd.read_csv(csv_path, header=None)\n\n except FileNotFoundError as e:\n # If file is not found, handle the exception and exit\n logger.error(e)\n raise\n\n return csv" ]
[ "0.82250005", "0.64448506", "0.6438859", "0.638522", "0.635775", "0.62658775", "0.6196743", "0.60490125", "0.60278124", "0.5936994", "0.59059805", "0.58804125", "0.58804125", "0.58078647", "0.5763088", "0.5756751", "0.5756751", "0.5756751", "0.5755957", "0.57534754", "0.5739853", "0.5739853", "0.5702425", "0.56916463", "0.5674672", "0.55689496", "0.55672044", "0.5558292", "0.54959595", "0.5495428" ]
0.7574934
1
Get the app's name.
def _get_app_name(app): return app[APP_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_name():\n return config.APP_NAME", "def app_name(self) -> str:\n return self._app_name", "def app_name(self):\n return self._app_name", "def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")", "def get_app_name(self):\n return getattr(self, '_app_name', None)", "def get_name(self, name):\n return self.apps[name]['name']", "def app_name(self): # pylint:disable=function-redefined\n return self._app_name", "def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")", "def getApplicationName(self) -> unicode:\n ...", "def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")", "def _get_app_name(self):\n # TODO move app name into pyglet.app (also useful for OS X menu bar?).\n return sys.argv[0]", "def app_name(self):\n module_filepath = inspect.getfile(type(self))\n parent_dir = os.path.dirname\n app_dirpath = parent_dir(parent_dir(parent_dir(module_filepath)))\n app_name = os.path.basename(app_dirpath)\n return app_name", "def name(self):\n return self.application_tree['name']", "def get_application_name(self, feed_id):\r\n return self._handler.get_application_name(feed_id)", "def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")", "def application_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"application_name\")", "def _app(self) -> str:\n return self.charm.app.name", "def module_name(self) -> str | None:\n try:\n return self._app_name.replace(\"-\", \"_\")\n except AttributeError:\n # If the app was created from an interactive prompt,\n # there won't be a module name.\n return None", "def app(self) -> str:\n return pulumi.get(self, \"app\")", "def get_app_name(i):\n return app_id + '-' + str(i)", "def app_name(self):\n return self._chromecast.app_display_name if self._chromecast else None", "def fallback_application_name() -> str:\n # Import here instead of at the top to avoid an ImportError caused by an\n # import cycle. This can be removed once the import graph of id3c.cli is\n # less tangled.\n from ..cli.utils import running_command_name\n\n # \"The application_name can be any string of less than NAMEDATALEN\n # characters (64 characters in a standard build).\"¹\n #\n # psycopg2 / libpq will truncate for us, but they will issue a NOTICE log\n # message if they do. Avoid the cluttery notice by truncating ourselves.\n #\n # ¹ https://www.postgresql.org/docs/current/runtime-config-logging.html#GUC-APPLICATION-NAME\n max_len = 64\n appname = running_command_name()\n\n return shorten(appname, max_len, \"...\")", "def current_app(self) -> str:\n app_id = self.app.get_current() # Returns the application ID (string) of the\n foreground_app = [x for x in self.app.list_apps() if app_id == x[\"id\"]][0]\n return foreground_app['title']", "def get_app_label(app_module):\n return app_module.__name__.split('.')[-1]" ]
[ "0.9113221", "0.88275534", "0.8791967", "0.87901825", "0.8730592", "0.8585392", "0.85836774", "0.84330225", "0.83433735", "0.83433735", "0.8274585", "0.81731343", "0.8169378", "0.8116642", "0.80174756", "0.7825664", "0.7809752", "0.7760691", "0.7760691", "0.7760691", "0.7760691", "0.7739239", "0.7708164", "0.7652033", "0.7561729", "0.75582975", "0.7517115", "0.738871", "0.73668987", "0.7339383" ]
0.88942343
1
Get the contact's first name.
def _get_contact_first_name(app): name = app.get(CONTACT_NAME_KEY) if name: return ' {}'.format(name.split(' ')[0])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_first_name(self):\n return self._first_name", "def get_first_name(self) -> str:\n return self.first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self):\n return self._first_name", "def first_name(self) -> str:\n return self._first_name", "def first_name(self):\n\n return self._first_name", "def firstname(self):\n return self._firstname", "def firstname(self):\n return self._firstname", "def first_name(self, instance):\r\n return instance.user.first_name", "def getFirstName(self):\n\t\treturn self.FirstName", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def first_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"first_name\")", "def FirstName(self, reg_first_name = VALUE_NOT_SET):\n if reg_first_name != VALUE_NOT_SET:\n self.__reg_first_name = reg_first_name\n try:\n self.__contact.AdditionalInfo().FirstName(self.__reg_first_name)\n except:\n pass\n else:\n if not self.__reg_first_name:\n self.__reg_first_name = None\n return self.__reg_first_name", "def get_user_firstname():\n if not is_authenticated() or 'samlUserdata' not in session:\n return None\n\n first_name = session.get('samlUserdata', {}).get(SAML_ATTRIBUTES.get('first_name', None), False)\n\n return first_name[0] if first_name else not_found('first_name')\n return None", "def get_first_name(self):\n element = self.driver.find_element(*self.firstname_textbox_selector)\n return element.get_attribute(\"value\")", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name", "def get_short_name(self):\n return self.first_name" ]
[ "0.86260074", "0.8617128", "0.8315871", "0.8315871", "0.8315871", "0.8312541", "0.81962097", "0.8183392", "0.8183392", "0.80284345", "0.7993891", "0.7896602", "0.7896602", "0.78449786", "0.7739931", "0.77090037", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754", "0.7706754" ]
0.8650525
0
Get the email template name for the first contact email.
def _get_first_contact_email_template_name(app): return app[FIRST_CONTACT_EMAIL_TEMPLATE_NAME_KEY]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_template_name(self):\n template = None\n if self.template:\n template = self.template\n if not template:\n for p in self.get_ancestors(ascending=True):\n if p.template:\n template = p.template\n break\n if not template:\n template = settings.CMS_TEMPLATES[0][0]\n for t in settings.CMS_TEMPLATES:\n if t[0] == template:\n return t[1] \n return _(\"default\")", "def _get_contact_first_name(app):\n name = app.get(CONTACT_NAME_KEY)\n if name:\n return ' {}'.format(name.split(' ')[0])", "def template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"template_name\")", "def template(self):\n template_names = self.get_template_names()\n if template_names:\n return template_names[0]\n return None", "def template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"template_name\")", "def get_template(self, template):\n\n template_path = aj.config.data['email']['templates'].get(template, 'default')\n\n if template_path == 'default' or not os.path.isfile(template_path):\n template_path = DEFAULT_TEMPLATES[template]\n\n return template_path", "def contact_name(self) -> str:\n return pulumi.get(self, \"contact_name\")", "def template_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"template_name\")", "def get_name_from_email(email):\r\n individual_name = email.split('@')[0]\r\n parts = individual_name.split('.')\r\n name = \" \".join(parts).title()\r\n return name", "def get_email_template_id(self):\n return self.email_template_id", "def get_full_name(self):\n\t\treturn self.email", "def get_name_from_email(email: str) -> str:\n before_at_symbol = email.split(\"@\")[0]\n name_parts = before_at_symbol.split(\".\")\n name = \" \".join(name_parts).title()\n return name", "def _get_template_fname(self):\n template_fname = self._context.get('template_fname', False)\n return template_fname", "def get_template():\r\n try:\r\n return CourseEmailTemplate.objects.get()\r\n except CourseEmailTemplate.DoesNotExist:\r\n log.exception(\"Attempting to fetch a non-existent course email template\")\r\n raise", "def find_template_name(self, regex, template_env=None):\n # Select template_env\n if not template_env:\n template_env = self._template_env\n\n # Find templates matching the regex\n template_list = template_env.list_templates(\n filter_func=lambda template_name: re.match(regex, template_name))\n\n # Select the first match\n if template_list:\n return template_list[0]\n else:\n return ''", "def displayname(self):\n return self.email", "def template_name(self, template_type: Union[TemplateType, str]) -> str:\n return self.options.get(\"templates\", {}).get(template_type, template_type)", "def contact_email(self) -> str:\n return pulumi.get(self, \"contact_email\")", "def get_short_name(self):\n\t\treturn self.email", "def launch_template_name(self) -> Optional[str]:\n return pulumi.get(self, \"launch_template_name\")", "def launch_template_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def getNameTemplate(self):\n\n return self.nameTemplate", "def launch_template_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"launch_template_name\")", "def _get_cfn_template_file_name(self, cfn_template_path: str) -> str:\n base_name = os.path.basename(cfn_template_path)\n (file_name, ext) = os.path.splitext(base_name)\n return file_name", "def inspect_template_name(self) -> str:\n return pulumi.get(self, \"inspect_template_name\")", "def get_short_name(self):\n\n return self.email", "def template_name(self):\n\t\traise NotImplementedError('template_name must be defined')", "def get_template_name(self):\n if self.template_name:\n return '%s' % self.template_name\n\n if self.template_name_prefix:\n return '%s%s.html' % (self.template_name_prefix, self.mode)\n\n for piece_name in reversed(list(self.pieces.keys())):\n piece = getattr(self, piece_name)\n result = piece.get_template_name()\n if result:\n return '%s.html' % result\n\n return None", "def get_first_name(self) -> str:\n return self.first_name", "def _get_contact_email(app):\n return app[CONTACT_EMAIL_KEY]" ]
[ "0.68836665", "0.6812669", "0.66186845", "0.66120255", "0.6575655", "0.6516548", "0.64882386", "0.64112824", "0.63238996", "0.6301571", "0.6288311", "0.6213783", "0.6116907", "0.60900533", "0.6080471", "0.6079054", "0.60654145", "0.6055386", "0.60473263", "0.60457283", "0.60357887", "0.6032748", "0.60245496", "0.5977315", "0.5976874", "0.5962834", "0.5917714", "0.5909153", "0.5900827", "0.5865399" ]
0.87440306
0
Gets the tote store url for this app.
def _get_app_tote_store_url(app): return app[APP_TOTE_STORE_URL]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNoteStoreUrl(self, authenticationToken):\r\n pass", "def getNoteStoreUrl(self, authenticationToken):\r\n self.send_getNoteStoreUrl(authenticationToken)\r\n return self.recv_getNoteStoreUrl()", "def get_store_path(cls):\n user_data_dir = cls.user_data_dir()\n store_path = os.path.join(user_data_dir, 'store.json')\n return store_path", "def store_path(self):\n return path.join(env.store_home, self._store_path)", "def get_url(self):\n return self.db_url", "def url(self):\n return self.storage.url(self.name)", "def helper_get_alt_task_store_name(self):\n return self.helper_retrieve_last_request_get_dict_key_val_index_zero_or_return_none(\"alt_task_store_name\")", "def get_uri(self):\n return self.url", "def log_store(self) -> str:\n return pulumi.get(self, \"log_store\")", "def get_store(self, store_name: str) -> Any:\n pass", "def _get_store(self):\n return self._store", "def get_url(self):\n\n return self.url", "def get_url(self):\n\n return self.url", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def url(self) -> str:\n return pulumi.get(self, \"url\")", "def getRootURL(self):\n return self.appRootURL", "def get_url(self):\n return self._url", "def get_url(self):\n return self.url", "def get_url(self):\n return self.url", "def store_endpoint(self):\n # Kind of cache for logging purposes (avoids repeated calls)\n self._store_endpoint = self.keystone_client.ceilometer_uri\n return self._store_endpoint", "def application_url(self) -> Optional[str]:\n return pulumi.get(self, \"application_url\")", "def tracking_url(self) -> str:\n return pulumi.get(self, \"tracking_url\")", "def log_store(self) -> Optional[str]:\n return pulumi.get(self, \"log_store\")", "def get_url(self):\n return self.base_driver.current_url", "def get_track_url(self) -> Optional[str]:\n return self.track_url", "def get_store(store_name: str):\n return store_handler.get_store(store_name)", "def get_store(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_STORE)", "def geturl(self):\n return self.__url", "def url(self) -> str:\n return self._url", "def url(self) -> str:\n return self._url" ]
[ "0.6696655", "0.64120066", "0.6146364", "0.5828228", "0.57313335", "0.5705295", "0.56562585", "0.5626252", "0.5613728", "0.5612579", "0.5582851", "0.555923", "0.555923", "0.5541939", "0.5541939", "0.5510885", "0.55108297", "0.5492612", "0.5492612", "0.5468174", "0.54611695", "0.54573894", "0.544008", "0.54178166", "0.54149383", "0.54030055", "0.5387014", "0.53735423", "0.5370176", "0.5370176" ]
0.84141535
0
Check if we already sent the first contact email.
def _did_send_first_contact_email(app): first_contact = app[FIRST_CONTACT_EMAIL_SENT_KEY] if first_contact and first_contact.lower() == 'y': return True return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def IsfirstAddContact(self):\n if search_text(contact.get_value('accounts'), isScrollable = 0, searchFlag = TEXT_CONTAINS):\n click_in_list_by_index(0)\n return True\n else:\n return False", "def recent_email_sent(self):\n recent_contact_activity = self.activity().filter(verb='Contacted complainant:', description__contains='Email sent').first()\n if recent_contact_activity:\n try:\n email = recent_contact_activity.description.split(\"'\")[1]\n except IndexError:\n email = None\n return email\n return None", "def is_replied_to(thread):\r\n messages = thread['messages']\r\n if len(messages) < 2:\r\n return False\r\n user_email = get_sender_email(messages[0])\r\n for i in range(1, len(messages)):\r\n sender_email = get_sender_email(messages[i])\r\n if user_email != sender_email:\r\n return True\r\n return False", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n blank_contact = self.create_contact(data={'email': ''})\n self.group.contacts.add(blank_contact)\n\n # run email job\n from aremind.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)", "def has_validated_email(self):\n return self.receipt_diploma_uploaded_at is not None", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def testMailSent(self):\n self.sendEmail()\n messages = self.mail_stub.get_sent_messages(to='[email protected]')\n self.assertEqual(1, len(messages))\n self.assertEqual('[email protected]', messages[0].to)", "def check_mail(self, update=False):\r\n return self.check_mail_dir(update=update)", "def test_send_subscribe_email(self):\n #Verifica se foi enviado 1 e-mail, o este não envia e-mail\n self.assertEqual(1, len(mail.outbox))", "def has_sender(self):\n return self.balance > 0", "def isEmailUsed(self, email):\n\n\t\ttestq = {\"email\": email};\n\t\ttest_result = self.db.request(\"getOne\", testq);\n\n\t\tif test_result:\n\t\t\treturn True;\n\t\telse:\n\t\t\treturn False;", "def test_skip_blank_emails(self):\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n reminders.Patient.objects.filter(\n pk__in=[self.test_patient.pk, self.other_patient.pk]\n ).update(next_visit=appt_date)\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n blank_contact = self.create_contact(data={'email': ''})\n null_contact = self.create_contact(data={'email': None})\n self.group.contacts.add(blank_contact)\n self.group.contacts.add(null_contact)\n\n self.startRouter()\n self.router.logger.setLevel(logging.DEBUG)\n # run email job\n from afrims.apps.reminders.app import daily_email_callback\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertEqual(len(message.to), 1)\n self.stopRouter()", "def check_notify(self):\n # no stage or no notify\n if not self.stage_id or not self.stage_id.notify:\n return False\n # mail already sent and don't send multiple times\n if self.stage_id in self.notified_stage_ids:\n if not self.stage_id.notify_multiple:\n return False\n # no mail template\n if not self.stage_id.notify_template_id:\n raise except_orm(\n _(u'Warning !'),\n _(u\"No email template selected \"\n u\"in the '%s' stage of the '%s' method\"\n ) % (self.stage_id.name, self.method_id.name))\n return True", "def testEmailAlreadyThere(self):\r\n res = self.app.post(\r\n '/signup_process',\r\n params={\r\n 'email': '[email protected]'\r\n }\r\n )\r\n self.assertIn('already signed up', res.body)", "def only_once(self) -> bool:\n return self.times == 1", "def is_once(today, last_send):\n if isinstance(today, datetime):\n if last_send is not None:\n if today.date() != last_send.date():\n return True\n return False\n return True\n else:\n raise Exception(\"{} is not a datetime instance\".format(today))", "def is_empty(self):\n if len(self.messages) < 1:\n return True\n else:\n return False", "def has_receipt_address(self):\n return self.receipt_address_uploaded_at is not None", "def has_validated_email(self):\n return self.user.email_user is not None", "def include_contact(self, contact_num: int):\n if self._unique_contacts is not None:\n return contact_num in self._unique_contacts\n else:\n return True", "def unfilled_contact(entry: ContactEntry) -> bool:\n if entry.email is not None:\n if len(entry.email) >= 1:\n if entry.email[0].address is not None:\n return False\n if entry.name is not None:\n if entry.name.given_name is not None:\n return False\n if entry.name.family_name is not None:\n return False\n if entry.organization is not None:\n if entry.organization.name is not None:\n if entry.organization.name.text is not None:\n return False\n if entry.organization.department is not None:\n if entry.organization.department.text is not None:\n return False\n return True", "def checkEmail():\n\tpop_conn = poplib.POP3_SSL('pop.gmail.com')\n\tpop_conn.user('')\n\tpop_conn.pass_('')\n\t#Get messages from server:\n\tmessages = [pop_conn.retr(i) for i in range(1, len(pop_conn.list()[1]) + 1)]\n\t# Concat message pieces:\n\tmessages = [\"\\n\".join(mssg[1]) for mssg in messages]\n\t#Parse message intom an email object:\n\tmessages = [parser.Parser().parsestr(mssg) for mssg in messages]\n\tflag = 0\n\tsweep = None\n\tfor message in messages:\n\t\tsubject = message['subject']\n\t\tif subject is None:\n\t\t\tcontinue\n\t\telif \"CommenceSweep:\" in subject:\n\t\t\tstart = subject.find(\":\")\n\t\t\tcommand = subject[start+1:]\n\t\t\tprint command\n\t\t\tif \"Comp\"+sys.argv[1] in command:\n\t\t\t\tstart = command.find(\"-\")\n\t\t\t\tsweep = command[start+1:]\n\t\t\t\tprint sweep\n\t\t\t\tpoplist = pop_conn.list()\n\t\t\t\tmsglist = poplist[1]\n\t\t\t\tfor msgspec in msglist:\n\t\t\t\t\tdelete = int(msgspec.split(' ')[0])\n\t\t\t\t\tpop_conn.dele(delete)\n\t\t\t\tflag = 1\n\tpop_conn.quit()\n\treturn flag, sweep", "def isSetEmail(self):\n return _libsbml.ModelCreator_isSetEmail(self)", "def check_duplicate_email(self, email):\r\n request = self.req_factory.post('unused_url', data={\r\n 'new_email': email,\r\n 'password': 'test',\r\n })\r\n request.user = self.user\r\n self.assertFailedRequest(self.run_request(request), 'An account with this e-mail already exists.')", "def action_my_payslip_sent(self):\n self.ensure_one()\n template = self.env.ref('payroll_email.email_template_for_my_payroll')\n if template:\n self.env['mail.template'].browse(template.id).send_mail(self.id,force_send=True)\n self.flag = True", "def get_receive_mail_str(self):\n ret = False\n if self.__mail:\n ret = True\n return ret", "def test_sending_mail(self):\n\n appt_date = datetime.date.today() + datetime.timedelta(days=7) # Default for email\n confirmed = self.create_confirmed_notification(self.test_patient, appt_date)\n\n # run email job\n daily_email_callback(self.router)\n\n self.assertEqual(len(mail.outbox), 1)\n message = mail.outbox[0]\n self.assertTrue(self.test_contact.email in message.to)", "def check_for_duplicate_subject_identifier(self):\n pass", "def is_first_synced(self):\n return True", "def check_duplicate_message(\n cls,\n recipient_id: str,\n email_subject: str,\n email_body: str\n ) -> bool:\n\n email_hash = cls._generate_hash(\n recipient_id, email_subject, email_body)\n\n datetime_now = datetime.datetime.utcnow()\n time_interval = datetime.timedelta(\n minutes=feconf.DUPLICATE_EMAIL_INTERVAL_MINS)\n\n sent_datetime_lower_bound = datetime_now - time_interval\n\n messages = cls.get_by_hash(\n email_hash, sent_datetime_lower_bound=sent_datetime_lower_bound)\n\n for message in messages:\n if (message.recipient_id == recipient_id and\n message.subject == email_subject and\n message.html_body == email_body):\n return True\n\n return False" ]
[ "0.6324304", "0.6179195", "0.610429", "0.59007627", "0.58576906", "0.5768399", "0.5731466", "0.57095504", "0.5704638", "0.56970215", "0.5689795", "0.566889", "0.56208533", "0.5612742", "0.5604829", "0.5577359", "0.55763453", "0.55086017", "0.55008966", "0.5475762", "0.5462582", "0.5454686", "0.5432835", "0.5419143", "0.541212", "0.53917944", "0.53865623", "0.5386411", "0.53730184", "0.5365943" ]
0.8393486
0
Sends out emails to the apps in the provided csv.
def send(app_csv='apps.csv', verbose=True, dry_run=True): results = [] app_info = _csv_to_dict(app_csv) for app in app_info: # Get all the app info needed for this request. app_name = _get_app_name(app) contact_first_name = _get_contact_first_name(app) email_address = _get_contact_email(app) app_tote_store_url = _get_app_tote_store_url(app) subject = _get_email_subject(app_name) # If we already sent the first contact email, continue. if _did_send_first_contact_email(app): result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='skipped', error=None, ) logger.info(result) results.append(result) continue try: # Get the appropriate template to send. email_template = _get_first_contact_email_template_name(app) template = env.get_template(email_template) # Render the template with app info. content = template.render( app_name=app_name, contact_first_name=contact_first_name, app_tote_store_url=app_tote_store_url, ) send_email(to=email_address, subject=subject, html=content, dry_run=dry_run) result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='success', error=None, ) except Exception as e: result = dict( app_name=app_name, contact_first_name=contact_first_name, email_address=email_address, app_tote_store_url=app_tote_store_url, subject=subject, status='failure', error=str(e), ) logger.info(result) results.append(result) # Sleep momentarily to avoid dos'ing the server. if not dry_run: time.sleep(0.1) if verbose: _print_summary(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generate_email_csv(csv_input):\n # Get a pandas dataframe column with all of the relevant duns numbers\n\n df = pd.read_csv(csv_input)\n duns_numbers = df.dunsnumber.tolist()\n\n # Gets the file number for the current file by taking the max of all of the other numbers in the lists directory and adding one to the hightest number\n\n non_decimal = re.compile(r'[^\\d]+')\n file_number_list = [int(non_decimal.sub('', file)) for file in listdir('mail/lists')]\n file_number = max(file_number_list)+1 if file_number_list else 1\n\n file_name = 'mail/lists/email_{0}.csv'.format(file_number)\n\n # Actually get the emails\n\n sam_qs = SamRecord.objects.all().filter(duns__in=duns_numbers)[:100]\n\n results = set([])\n\n pudb.set_trace()\n\n for sam in sam_qs:\n email = sam.email_address\n if email:\n results.add(email)\n\n with open(file_name, 'w') as f:\n for email in results:\n f.write(email+\"\\n\")", "def exec(self): \r\n emails = self.args[0].split(',')\r\n for email in emails:\r\n send_mail(self.args[1], self.args[2], email)\r\n return_text = \"Sent Mail To :: \" + self.args[0] +\"\\n\" + self.args[1] + \":\\n\" + self.args[2]\r\n return return_text", "def main(arguments, emailer):\n emailer.read_config()\n print(\"Config read.\")\n emailer.setup_config(pages=arguments.pages,\n email_list=arguments.email_list,\n items_range=arguments.range,\n config=arguments.config,\n database=arguments.database,\n file=arguments.file,\n email_address=arguments.email_address,\n email_password=arguments.email_password,\n send_time=arguments.time,\n frequency=arguments.frequency)\n emailer.write_config()\n \n emailer.setup_database()\n if emailer.pull_items_search() != 'bot':\n print(\"Items retrieved\")\n else:\n return\n \n emailer.items_to_xls()\n print(\"xls file created.\")\n emailer.items_to_csv()\n print(\"csv file created\")\n\n print(\"Sending emails.\")\n emailer.send_email()", "def send_emails(self):\n\n with open(self.emails_file) as fp:\n emails = fp.readlines()\n logging.debug('%s e-mail addresses are loaded from %s' % (len(emails), self.emails_file))\n\n emails = map(lambda email: email.strip(), emails)\n\n for i, email in enumerate(emails):\n try:\n self.send_email(email)\n except Exception as e:\n logging.exception('Can\\'t send e-mail to %s (number %s)!' % (email, i))\n else:\n logging.debug('E-mail was sent to %s (number %s)' % (email, i))\n\n sleep_time = self.timeout * (0.5 + random.random())\n time.sleep(sleep_time) # timeout\n\n logging.debug('Done!')", "def send_email(settings, excel):\n Email._set_email(settings, excel)\n Email._send_email_helper(settings, excel)", "def emailJobs(\n df, \n retainedCompany, \n senderName, \n defaultSenderEmail, \n emailPassword, \n senderTitle, \n senderCompany, \n senderCompanyHomePage, \n senderPhone, \n noContactCompanyListPickleFileName, \n port=465, \n returnHTML=True\n ):\n try:\n with open(noContactCompanyListPickleFileName, 'rb') as inputFile:\n noContactCompanyList = pickle.load(inputFile) \n except:\n noContactCompanyList = []\n\n for i in range(len(df)):\n companyName = df['Organization Name'][i]\n if companyName.lower() in noContactCompanyList:\n pass\n try:\n domainName = df['Domain'][i]\n jobsEmails = [prefix + '@' + domainName for prefix in ['jobs', 'careers']]\n # email all the jobs pages for that copmany\n sendEmails( \n 'guys', # addressing general company, so use 'guys' instead of individual name\n retainedCompany,\n companyName,\n jobsEmails,\n senderName,\n defaultSenderEmail,\n emailPassword,\n senderTitle,\n senderCompany,\n senderCompanyHomePage,\n senderPhone,\n port=port,\n returnHTML = returnHTML \n ) \n except:\n pass", "def write_emails_to_file(result_emails, category):\r\n\tf = open('emails.csv', 'wb')\r\n\tcsvWriter = csv.writer(f, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\r\n\tfor email in result_emails:\r\n\t\tcsvWriter.writerow([email, category])\t\r\n\tf.close()", "def _auto_email_send(self):\n records = self.search([('send_by', '=', 'mail')])\n\n for supplier in records:\n send_at = datetime.combine(fields.Date.today(),\n float_to_time(supplier.automatic_email_time, supplier.moment, supplier.tz)).astimezone(pytz.UTC).replace(tzinfo=None)\n if supplier.available_today and fields.Datetime.now() > send_at:\n lines = self.env['lunch.order'].search([('supplier_id', '=', supplier.id),\n ('state', '=', 'ordered'), ('date', '=', fields.Date.today())])\n\n if lines:\n order = {\n 'company_name': lines[0].company_id.name,\n 'currency_id': lines[0].currency_id.id,\n 'supplier_id': supplier.partner_id.id,\n 'supplier_name': supplier.name,\n 'email_from': supplier.responsible_id.email_formatted,\n }\n\n _lines = [{\n 'product': line.product_id.name,\n 'note': line.note,\n 'quantity': line.quantity,\n 'price': line.price,\n 'toppings': line.display_toppings,\n 'username': line.user_id.name,\n } for line in lines]\n\n order['amount_total'] = sum(line.price for line in lines)\n\n self.env.ref('lunch.lunch_order_mail_supplier').with_context(order=order, lines=_lines).send_mail(supplier.id)\n\n lines.action_confirm()", "def _send_bulk_mail(\n recipient_ids, sender_id, intent, email_subject, email_html_body,\n sender_email, sender_name, instance_id=None):\n _require_sender_id_is_valid(intent, sender_id)\n\n recipients_settings = user_services.get_users_settings(recipient_ids)\n recipient_emails = [user.email for user in recipients_settings]\n\n cleaned_html_body = html_cleaner.clean(email_html_body)\n if cleaned_html_body != email_html_body:\n log_new_error(\n 'Original email HTML body does not match cleaned HTML body:\\n'\n 'Original:\\n%s\\n\\nCleaned:\\n%s\\n' %\n (email_html_body, cleaned_html_body))\n return\n\n raw_plaintext_body = cleaned_html_body.replace('<br/>', '\\n').replace(\n '<br>', '\\n').replace('<li>', '<li>- ').replace('</p><p>', '</p>\\n<p>')\n cleaned_plaintext_body = html_cleaner.strip_html_tags(raw_plaintext_body)\n\n def _send_bulk_mail_in_transaction(instance_id=None):\n \"\"\"Sends the emails in bulk to the recipients.\"\"\"\n sender_name_email = '%s <%s>' % (sender_name, sender_email)\n\n email_services.send_bulk_mail(\n sender_name_email, recipient_emails, email_subject,\n cleaned_plaintext_body, cleaned_html_body)\n\n if instance_id is None:\n instance_id = email_models.BulkEmailModel.get_new_id('')\n email_models.BulkEmailModel.create(\n instance_id, recipient_ids, sender_id, sender_name_email, intent,\n email_subject, cleaned_html_body, datetime.datetime.utcnow())\n\n transaction_services.run_in_transaction(\n _send_bulk_mail_in_transaction, instance_id)", "def execute(self):\n return LOGGER.info(f\"{datetime.datetime.now()} - Sending EMail to the configured email list\")", "def write_to_csv(list_of_emails):\n import csv\n # use newline='' to prevent double-spaced rows\n with open('emails.csv', 'w', newline='') as outFile:\n outWriter = csv.writer(outFile)\n charNum = outWriter.writerow(['email'])\n for i in list_of_emails:\n charNum = outWriter.writerow([i])\n outFile.close()", "def send_email_users():\n\n # Get users emails\n users_emails = User.objects.exclude(\n Q(email='') |\n Q(email=None)\n ).values_list(\n 'email',\n flat=True\n )\n\n # Send email to each user\n # for email_user in users_emails:\n\n title = 'Se han calculado nuevos Hard Flag'\n msg = 'Actualmente se han agregado nuevos hard flag '\n msg += ' a la base de datos'\n\n email = EmailMessage(\n title,\n msg,\n to=users_emails\n )\n email.send()", "def send_bulk_course_email(entry_id, _xmodule_instance_args):\r\n # Translators: This is a past-tense verb that is inserted into task progress messages as {action}.\r\n action_name = ugettext_noop('emailed')\r\n visit_fcn = perform_delegate_email_batches\r\n return run_main_task(entry_id, visit_fcn, action_name)", "def send_emails():\n\n cmd = \"sendmail -f [email protected]\"\n for msg in EMAIL_MESSAGES:\n for rec in RECIPIENTS:\n call(\"echo '%s' | %s %s\" % (msg, cmd, rec), None, True)", "def sendMail(listEmailsToSend, title, data):\n if isinstance(listEmailsToSend, str):\n listEmailsToSend = [listEmailsToSend]\n send_mail(\n f'{title}',\n f'{data}',\n settings.EMAIL_HOST_USER,\n listEmailsToSend,\n fail_silently=False,\n )", "def send_email_to_admins(self, template_name, subject, **kw):\n \n mailer = self.app.module_map['mail']\n barcamp = self.barcamp\n new_user = self.user # active user\n for admin in self.barcamp.admin_users:\n print admin\n send_tos = [admin.email]\n kwargs = dict(\n new_user = new_user,\n user = admin,\n barcamp = barcamp,\n url = self.handler.url_for(\"barcamps.index\", slug = self.barcamp.slug, _full = True),\n notification_url = self.handler.url_for(\"barcamps.edit\", slug = self.barcamp.slug, _full = True)\n )\n kwargs.update(kw)\n payload = self.handler.render_lang(\"emails/%s.txt\" %template_name, **kwargs)\n mailer.mail(admin.email, subject, payload)", "def send_mail(month: str, data: list):\n\n V2RayLogger.debug('SMTP server: {0}:{1}.'.format(Config.get('mail_host'), Config.get('mail_port')))\n smtp = smtplib.SMTP_SSL(Config.get('mail_host'), Config.get('mail_port'))\n V2RayLogger.debug('SMTP login with: {0}:{1}.'.format(Config.get('mail_user'), Config.get('mail_pass')))\n smtp.login(Config.get('mail_user'), Config.get('mail_pass'))\n V2RayLogger.debug('SMTP login successful.')\n\n for row in data:\n V2RayLogger.debug('Send email: {0}:{1}.'.format(row[0], row[1]))\n message = '<tr align=left><th align=\"left\">{0:30s}</th><th align=\"left\">{1:9s}</th></tr>\\n'.format(\n row[0], row[1])\n message = MIMEText(message, 'html')\n message['Subject'] = Header(Config.get('mail_subject') + ': {0}'.format(month))\n message['From'] = Config.get('mail_user')\n message['To'] = row[0]\n\n smtp.sendmail(Config.get('mail_user'), row[0], message.as_string())\n V2RayLogger.info('Send traffic to: {0}.'.format(row[0]))", "def send_test_email_for_bulk_emails(tester_id, email_subject, email_body):\n tester_name = user_services.get_username(tester_id)\n tester_email = user_services.get_email_from_user_id(tester_id)\n _send_email(\n tester_id, tester_id, feconf.BULK_EMAIL_INTENT_TEST,\n email_subject, email_body, tester_email, sender_name=tester_name)", "def main():\n credentials = get_credentials()\n http = credentials.authorize(httplib2.Http())\n service = discovery.build('gmail', 'v1', http=http)\n\n messageIds = []\n i = 0\n nextPageToken = None\n while (i <= 15):\n try:\n response = service.users().messages().list(userId='me', q='after:2016/09/01', maxResults=10000, pageToken=nextPageToken).execute()\n messages = response.get('messages')\n nextPageToken = response['nextPageToken']\n\n for m in messages:\n messageIds.append(m['id'])\n\n i+=1 \n except KeyError:\n break\n\n senders = []\n counter = 0\n for i in messageIds:\n data = service.users().messages().get(userId='me', id=i).execute()\n for d in data['payload']['headers']:\n if d['name'] == 'Received':\n print(d['value'][d['value'].find('; ')+1:d['value'].find('(PST)')])\n if d['name'] == 'From' and 'bounce' not in d['value']:\n senders.append(d['value'])\n print(counter, ' ', d['value'])\n counter += 1\n break\n\n emails = []\n with open('out.csv', 'wb') as f:\n writer = csv.writer(f, delimiter=',')\n for person in set(senders):\n cleaned = clean_data(person)\n name = cleaned[0]\n email = cleaned[1]\n if email not in emails:\n emails.append(email)\n if name != None and email != None:\n writer.writerow([name, email])", "def readInCSV(csvFile):\n\tprint \"Checking if helper app is installed...\"\n\tandroidCheckAndInstallHelper()\n\ttry:\n\t\tprint \"Will read in the files from %s\" % csvFile\n\t\tstatus = subprocess.call([\"adb\",\"shell\",\"am\",\"startservice\",\n\t\t\t\t\t\t\t\t \"-a\", \"com.synchronoss.androidDev.contactcreaterapp.action.IMPORT\",\n\t\t\t\t\t\t\t\t \"-e\", \"CSV\", csvFile,\n\t\t\t\t\t\t\t\t \"com.synchronoss.androidDev.contactcreaterapp/.CreateAndAddContacts\"],\n\t\t\t\t\t\t\t\t stdout=stdout,stderr=stderr)\n\t\tif (status == 1):\n\t\t\tprint \"Contacts successfully copied from csv on target device.\"\n\t\tif (status != 0):\n\t\t\tprint >>sys.stderr, \"Unable to launch contact adder app\"\n\t\t\tsys.exit()\n\texcept OSError as e:\n\t\tprint >>sys.stderr, \"Execution failed: \", e\n\t\tsys.exit()\n\twaitForHelperApp()", "def handle(self, *args, **options):\n\n candidates_with_email = [candidate for candidate in Candidate.objects.all()\n if candidate.contact_address and candidate.participating]\n\n\n print 'sending e-mails'\n conn = get_connection()\n for c in candidates_with_email:\n if c.should_send_reminder():\n\n print 'emailing', c\n # store timestamp for reminder email so that they don't get another one for <REMINDER_TIME_PERIOD> days\n c.last_reminder_sent = timezone.now()\n c.save()\n msg = make_email(c)\n conn.send_messages([msg])\n conn.close()", "def email_import(ctx, user_csv, group_size, group_name, section_name):\n usersChunked = dict()\n\n config_options = lazyTools.TOMLConfigCTXImport(ctx)\n\n debug = lazyTools.parentSetting(ctx, \"debug\")\n verbose = lazyTools.parentSetting(ctx, \"verbose\")\n\n if section_name.lower() in config_options[\"gophish\"]:\n\n # Debug print statement to check if the section name was properly found\n if debug:\n click.secho(\"[*] Section name found in config file.\", fg=\"green\")\n\n # Check if we need to be on the VPN\n if config_options[\"gophish\"][section_name.lower()][\"VPN_Required\"]:\n # Skip VPN check if debug is True\n if debug:\n click.secho(\"[*] Skipping VPN check \")\n else:\n if lazyTools.ConnectedToVPN(ctx.parent.parent.params[\"config_path\"]):\n # Connected to VPN\n if debug:\n click.secho(\"[*] Connected to VPN\", fg=\"green\")\n else:\n raise click.Abort(\n \"The VPN does not appear to be connected. Try again after connecting to the VPN. \"\n )\n\n # Connect to GoPhish server\n if debug:\n click.echo(\n \"[*] Using hostname: https://{hostname}:{port}\".format(\n hostname=config_options[\"gophish\"][section_name.lower()][\n \"Hostname\"\n ],\n port=config_options[\"gophish\"][section_name.lower()][\"Port\"],\n )\n )\n if config_options[\"gophish\"][section_name.lower()][\"Verify_SSL\"]:\n click.echo(\"[*] SSL connections will be verified.\")\n else:\n click.secho(\"[*] SSL connections will not be verified.\", bold=True)\n\n api = Gophish(\n config_options[\"gophish\"][section_name.lower()][\"api_key\"],\n host=\"https://{hostname}:{port}\".format(\n hostname=config_options[\"gophish\"][section_name.lower()][\"Hostname\"],\n port=config_options[\"gophish\"][section_name.lower()][\"Port\"],\n ),\n verify=config_options[\"gophish\"][section_name.lower()][\"Verify_SSL\"],\n )\n\n # Try to get list of existing groups\n try:\n groups = api.groups.get()\n except requests.exceptions.ConnectionError as e:\n click.secho(\n \"Connection to the GoPhish server failed because {e}. Check the host and try again.\".format(\n e=e\n ),\n fg=\"red\",\n )\n raise click.Abort()\n\n # Check if something went wrong. Error parsing on the part of GoPhish library needs some love.\n if isinstance(groups, Error):\n click.secho(\n \"[!] {message}. Remediate the issue and try again.\".format(\n message=groups.message\n ),\n fg=\"red\",\n bold=True,\n )\n raise click.Abort()\n\n # groups isn't an Error object, so we *should* be good to go.\n if debug:\n click.secho(\"A list of groups was successfully acquired.\", fg=\"green\")\n\n # List all users in existing groups\n for group in groups:\n # print(group.targets)\n for user in group.targets:\n pass # print(vars(user))\n # printUsersInGroup(group)\n\n # Read the CSV file with the users in it.\n with open(user_csv, \"r\", encoding=\"utf-8\") as user_csv_file:\n # dialect = csv.Sniffer().sniff(user_csv_file.read(1024))\n # print(vars(dialect))\n userReader = csv.DictReader(user_csv_file, delimiter=\",\")\n rowList = list()\n for row in userReader:\n rowList.append(row)\n\n # click.echo(tabulate(rowList, headers='keys', tablefmt=\"grid\"))\n\n # Divide the list of users into groups by group name\n # Template: <First>_<Second>_<Number\n # i.e. Phishing_Campaign_Remote_4\n\n group_name = group_name.replace(\" \", \"_\")\n group_name = group_name + \"_{}\"\n\n if group_size == 0:\n # Do not divide list of group_size is 0\n usersChunked = {group_name.format(1): rowList}\n\n else:\n chunks = [\n rowList[x : x + group_size] for x in range(0, len(rowList), group_size)\n ]\n\n for count, userListChunk in enumerate(chunks, start=1):\n usersChunked.update({group_name.format(count): userListChunk})\n\n # For each group in usersChunked, upload\n with click.progressbar(\n usersChunked,\n length=len(usersChunked),\n label=\"Groups Added\",\n show_eta=False,\n show_pos=True,\n ) as bar:\n for chunkName in bar:\n targetList = list()\n for user in usersChunked[chunkName]:\n targetList.append(\n User(\n first_name=user[\"First Name\"],\n last_name=user[\"Last Name\"],\n email=user[\"Email\"],\n position=user[\"Position\"],\n )\n )\n group = Group(name=chunkName, targets=targetList)\n\n group = api.groups.post(group)\n\n if isinstance(group, Error):\n click.secho(\n \"[!] {message}. Remediate the issue and try again.\".format(\n message=group.message\n ),\n fg=\"red\",\n bold=True,\n )\n raise click.Abort()\n\n if debug:\n click.echo(\"Group {} was successfully added.\".format(group.name))\n\n else:\n raise click.BadParameter(\n \"The section name '{}' doesn't appear to exist. Check the config file and try again.\".format(\n ctx.params[\"section_name\"]\n )\n )", "def report_mailer(accounts, days):\n account_names = _parse_accounts(accounts)\n sm_report_mailer(account_names, days)", "def send_main_email(self):\n\n print \"Sending main email\"\n \n # Make an html table to be body of email\n html_table = '<table style=\"font-size:12px\">'\n html_table += self.make_nfs_changed_rows(\"sprint\") # New features only\n html_table += self.make_nfs_changed_rows(\"status\") # New features only\n html_table += self.make_time_in_status_rows(self.stalled_nf_issues) \n html_table += self.make_time_in_status_rows(self.stalled_st_issues) # Sub-tasks\n html_table += '</table>' # Closing table tag\n\n recipients = self.config.get(\"recipients\", \"emails\").split(\"\\n\") # [recipients] section in .ini file\n \n# emails = self.config.items('recipients')\n# for key, email in emails:\n# recipients = ', '.join(self.config.items('recipients'))\n \n print recipients\n# sys.exit()\n self.send_email(recipients, html_table)", "def shops_procurement_email_csv(request):\n\n Order.objects.all().delete()\n Product.objects.all().delete()\n\n procurements = Procurement.objects.all()\n\n if procurements:\n response = HttpResponse(mimetype='text/csv')\n response['Content-Disposition'] = 'attachment; filename=procurement_%s.csv' % procurement_id\n\n for procurement in procurements:\n\n writer = csv.writer(response)\n\n writer.writerow([\n 'Vendor',\n 'Product',\n 'Variant',\n 'Quantity',\n 'Grams'])\n\n order_by_args = [\n 'product_variant__product__vendor',\n 'product_variant', ]\n procurement_items = procurement.procurementitem_set.all().order_by(*order_by_args)\n\n for procurement_item in procurement_items:\n writer.writerow([\n procurement_item.product_variant.product.vendor,\n str(procurement_item.product_variant.product),\n str(procurement_item.product_variant.option1),\n str((procurement_item.order_units) or ''),\n str((procurement_item.order_weight) or '')])\n\n return response", "def _send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status):\r\n # Get information from current task's request:\r\n task_id = subtask_status.task_id\r\n\r\n try:\r\n course_email = CourseEmail.objects.get(id=email_id)\r\n except CourseEmail.DoesNotExist as exc:\r\n log.exception(\"Task %s: could not find email id:%s to send.\", task_id, email_id)\r\n raise\r\n\r\n # Exclude optouts (if not a retry):\r\n # Note that we don't have to do the optout logic at all if this is a retry,\r\n # because we have presumably already performed the optout logic on the first\r\n # attempt. Anyone on the to_list on a retry has already passed the filter\r\n # that existed at that time, and we don't need to keep checking for changes\r\n # in the Optout list.\r\n if subtask_status.get_retry_count() == 0:\r\n to_list, num_optout = _filter_optouts_from_recipients(to_list, course_email.course_id)\r\n subtask_status.increment(skipped=num_optout)\r\n\r\n course_title = global_email_context['course_title']\r\n subject = \"[\" + course_title + \"] \" + course_email.subject\r\n from_addr = _get_source_address(course_email.course_id, course_title)\r\n\r\n course_email_template = CourseEmailTemplate.get_template()\r\n try:\r\n connection = get_connection()\r\n connection.open()\r\n\r\n # Define context values to use in all course emails:\r\n email_context = {'name': '', 'email': ''}\r\n email_context.update(global_email_context)\r\n\r\n while to_list:\r\n # Update context with user-specific values from the user at the end of the list.\r\n # At the end of processing this user, they will be popped off of the to_list.\r\n # That way, the to_list will always contain the recipients remaining to be emailed.\r\n # This is convenient for retries, which will need to send to those who haven't\r\n # yet been emailed, but not send to those who have already been sent to.\r\n current_recipient = to_list[-1]\r\n email = current_recipient['email']\r\n email_context['email'] = email\r\n email_context['name'] = current_recipient['profile__name']\r\n\r\n # Construct message content using templates and context:\r\n plaintext_msg = course_email_template.render_plaintext(course_email.text_message, email_context)\r\n html_msg = course_email_template.render_htmltext(course_email.html_message, email_context)\r\n\r\n # Create email:\r\n email_msg = EmailMultiAlternatives(\r\n subject,\r\n plaintext_msg,\r\n from_addr,\r\n [email],\r\n connection=connection\r\n )\r\n email_msg.attach_alternative(html_msg, 'text/html')\r\n\r\n # Throttle if we have gotten the rate limiter. This is not very high-tech,\r\n # but if a task has been retried for rate-limiting reasons, then we sleep\r\n # for a period of time between all emails within this task. Choice of\r\n # the value depends on the number of workers that might be sending email in\r\n # parallel, and what the SES throttle rate is.\r\n if subtask_status.retried_nomax > 0:\r\n sleep(settings.BULK_EMAIL_RETRY_DELAY_BETWEEN_SENDS)\r\n\r\n try:\r\n log.debug('Email with id %s to be sent to %s', email_id, email)\r\n\r\n with dog_stats_api.timer('course_email.single_send.time.overall', tags=[_statsd_tag(course_title)]):\r\n connection.send_messages([email_msg])\r\n\r\n except SMTPDataError as exc:\r\n # According to SMTP spec, we'll retry error codes in the 4xx range. 5xx range indicates hard failure.\r\n if exc.smtp_code >= 400 and exc.smtp_code < 500:\r\n # This will cause the outer handler to catch the exception and retry the entire task.\r\n raise exc\r\n else:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc.smtp_error)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n except SINGLE_EMAIL_FAILURE_ERRORS as exc:\r\n # This will fall through and not retry the message.\r\n log.warning('Task %s: email with id %s not delivered to %s due to error %s', task_id, email_id, email, exc)\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n subtask_status.increment(failed=1)\r\n\r\n else:\r\n dog_stats_api.increment('course_email.sent', tags=[_statsd_tag(course_title)])\r\n if settings.BULK_EMAIL_LOG_SENT_EMAILS:\r\n log.info('Email with id %s sent to %s', email_id, email)\r\n else:\r\n log.debug('Email with id %s sent to %s', email_id, email)\r\n subtask_status.increment(succeeded=1)\r\n\r\n # Pop the user that was emailed off the end of the list only once they have\r\n # successfully been processed. (That way, if there were a failure that\r\n # needed to be retried, the user is still on the list.)\r\n to_list.pop()\r\n\r\n except INFINITE_RETRY_ERRORS as exc:\r\n dog_stats_api.increment('course_email.infinite_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_nomax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_nomax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=True\r\n )\r\n\r\n except LIMITED_RETRY_ERRORS as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # Errors caught are those that indicate a temporary condition that might succeed on retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n except BULK_EMAIL_FAILURE_ERRORS as exc:\r\n dog_stats_api.increment('course_email.error', tags=[_statsd_tag(course_title)])\r\n num_pending = len(to_list)\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with \"fatal\" exception. %d emails unsent.',\r\n task_id, email_id, num_pending)\r\n # Update counters with progress to date, counting unsent emails as failures,\r\n # and set the state to FAILURE:\r\n subtask_status.increment(failed=num_pending, state=FAILURE)\r\n return subtask_status, exc\r\n\r\n except Exception as exc:\r\n # Errors caught here cause the email to be retried. The entire task is actually retried\r\n # without popping the current recipient off of the existing list.\r\n # These are unexpected errors. Since they might be due to a temporary condition that might\r\n # succeed on retry, we give them a retry.\r\n dog_stats_api.increment('course_email.limited_retry', tags=[_statsd_tag(course_title)])\r\n log.exception('Task %s: email with id %d caused send_course_email task to fail with unexpected exception. Generating retry.',\r\n task_id, email_id)\r\n # Increment the \"retried_withmax\" counter, update other counters with progress to date,\r\n # and set the state to RETRY:\r\n subtask_status.increment(retried_withmax=1, state=RETRY)\r\n return _submit_for_retry(\r\n entry_id, email_id, to_list, global_email_context, exc, subtask_status, skip_retry_max=False\r\n )\r\n\r\n else:\r\n # All went well. Update counters with progress to date,\r\n # and set the state to SUCCESS:\r\n subtask_status.increment(state=SUCCESS)\r\n # Successful completion is marked by an exception value of None.\r\n return subtask_status, None\r\n finally:\r\n # Clean up at the end.\r\n connection.close()", "def raw_csv_app_2w(request):\n two_weeks = datetime.date.today() - datetime.timedelta(days=14)\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = 'atachment; filename = \"raw-powerbi-app-2w.csv\"'\n app_er = App_error.objects.filter(event_date__gt=two_weeks)\n app_w = App_warning.objects.filter(event_date__gt=two_weeks)\n app_crit = App_critical.objects.filter(event_date__gt=two_weeks)\n writer = csv.writer(response)\n for line in app_er:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app error'])\n for line in app_w:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app warning'])\n for line in app_crit:\n writer.writerow([line.event_id, line.event_source, line.event_description,\n line.machine_name, line.events_count, line.event_user, line.event_date, 'app critical'])\n\n return response", "def email(args):\n if args.name:\n add_user(name=args.name, email_address=args.email)\n\n if args.add_term:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=args.add_term.upper())\n if args.terms_from_file:\n with open(args.terms_from_file) as file:\n for line in file:\n Feed(Config.database).add_search_term(email_address=args.email,\n search_term=line.strip().upper())\n if args.remove_term:\n Feed(Config.database).remove_search_term(email_address=args.email,\n term=args.remove_term)", "def sendEmail(request, names):\n datas = ()\n i = 1\n for name in [name for name in names.split(',')]:\n # user1 = get_object_or_404(User, username='徐超伟')\n # print(user1.email)\n if name:\n # print(name)\n user = get_object_or_404(User, username__exact=name)\n if not user.email:\n request.session['res'] = '0'\n # print(res)\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))\n\n message = (u'还书提示', u'你已经超出了还书期限,请尽快归还图书。',\n 'LocalLibrarySystem<[email protected]>', [user.email])\n datas += (message,)\n\n res = send_mass_mail(datas, fail_silently=False,)\n # print(res)\n request.session['res'] = res\n return HttpResponseRedirect(reverse('catalog:all-borrowed'))", "def get_buyer_emails():\n sales_data = data_manager.get_table_from_file(\"sales/sales.csv\")\n return {(crm.get_name_by_id(row[CUSTOMER_ID]), crm.get_email_by_id(row[CUSTOMER_ID])) for row in sales_data}" ]
[ "0.5723836", "0.556769", "0.5490822", "0.54697037", "0.54161894", "0.5410227", "0.5337697", "0.53324795", "0.5231842", "0.52236265", "0.5195237", "0.5186955", "0.51598907", "0.51523805", "0.5090487", "0.508662", "0.5077356", "0.50331646", "0.50279236", "0.50225353", "0.5014937", "0.50014985", "0.496455", "0.48563868", "0.48427927", "0.48389235", "0.48273385", "0.48131394", "0.47965944", "0.47829708" ]
0.7534394
0
writes data from instream into additional allocated clusters of given file. Metadata of this file will be stored in Metadata object
def write(self, instream: typ.BinaryIO, filepath: str, filename: str = None) -> None: if filename is not None: filename = path.basename(filename) if self.fs_type == 'FAT': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) elif self.fs_type == 'NTFS': allocator_metadata = self.fs.write(instream, filepath) self.metadata.add_file(filename, allocator_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __write(self, storage):\n\n positions = storage.get_positions()\n if len(positions) == 0: return\n\n X = storage.get_X()\n Y = storage.get_Y()\n\n if Y: assert len(positions) == len(X) == len(Y)\n else: assert len(positions) == len(X)\n\n start, end = positions[0][0][0], positions[-1][-1][0]\n\n group = self.f.create_group(f'{storage.name}_{start}-{end}')\n group['positions'] = positions\n\n if Y: group['labels'] = Y\n\n group.attrs['contig'] = storage.name\n group.attrs['size'] = len(positions)\n\n group.create_dataset('examples', data=X, chunks=(1, 200, 90))", "def WriteClustersToImage(self):\n # Use the array we built earlier\n print(f\"Writing the following list of clusters to FAT structure: {self.cluster_list}\")\n padding = 3\n with open(self.output_file, \"r+b\") as fh:\n # The first cluster goes into offset 26 (2 Bytes) in root directory\n seeker = (self.root_directory_offset*self.sector_size)+((self.index_number-1)*self.directory_index_size)+(self.starting_cluster_offset)\n # Convert first item in list to two bytes\n first_address = (self.cluster_list[0]).to_bytes(2, byteorder='little')\n print(f\"If I were me, I'd write {first_address} to {seeker}\")\n fh.seek(seeker)\n fh.write(first_address)\n # Now, the rest are written to FAT area\n for i, item in enumerate(self.cluster_list):\n # If Entry 1 then the byte calculation returned a whole number\n # If Entry 2 then the byte calculation returned a half number\n # This item determines where we write the data\n entry1, entry2, seeker = self.IsEntryHighOrLow(item)\n # The data we are writing is the next item\n if i+1 >= len(self.cluster_list):\n next_item = 4095\n else:\n next_item = self.cluster_list[i+1]\n # If we're at the end of the list then write 0xfff\n print(f\"Ready to perform calculations on {next_item} (hex:{hex(next_item)}) [entry1={entry1}; entry2={entry2}, seeker={seeker}]\")\n fh.seek(seeker)\n my_bytes = b'\\x00'+fh.read(3)\n if self.debug:\n print(f\"bytes from disk image: {my_bytes}\")\n unpacked_bytes, = struct.unpack('>I', bytes(my_bytes))\n if self.debug:\n print(type(unpacked_bytes), unpacked_bytes)\n nstr = str(hex(unpacked_bytes)).replace('0x', '').zfill(6)\n le_three_bytes = \"\".join(map(str.__add__, nstr[-2::-2] ,nstr[-1::-2]))\n if self.debug:\n print(f\"Existing values: unpacked_bytes:{hex(unpacked_bytes)}|nstr:{nstr}|(le)three_bytes:{le_three_bytes}|Entry1={le_three_bytes[-3:]}|Entry2={le_three_bytes[:3]}\")\n if entry1:\n # We need to deal with entry1 (see page 7 of scan24 paper)\n if self.debug:\n print(\"Updating entry1\")\n entry1_bytes = hex(next_item)[2:].zfill(3)\n entry2_bytes = le_three_bytes[:3]\n else:\n if self.debug:\n print(\"Updating entry2\")\n entry1_bytes = le_three_bytes[-3:]\n entry2_bytes = hex(next_item)[2:].zfill(3)\n new_entry = f\"{entry2_bytes}{entry1_bytes}\"\n if self.debug:\n print(f\"new_entry: {new_entry}\")\n packed_bytes = struct.pack('<I', int(new_entry, 16))\n if self.debug:\n print(f\"Writing packed_bytes ({packed_bytes[:-1]}) to {seeker}\")\n fh.seek(seeker)\n fh.write(packed_bytes[:-1])\n print(f\"{self.filename}.{self.extension} written to root directory index #{self.index_number}\")\n return True", "def __write_matlab_clusters(tel, filename):\n # type: (TelescopeAnalysis, str) -> None\n centre_x = np.array([])\n centre_y = np.array([])\n points_x = np.array([])\n points_y = np.array([])\n for name in tel.layouts:\n if name == 'ska1_v5':\n continue\n layout = tel.layouts[name]\n centre_x = np.hstack((centre_x, layout['cx']))\n centre_y = np.hstack((centre_y, layout['cy']))\n if points_x.size == 0:\n points_x = layout['x']\n points_y = layout['y']\n else:\n points_x = np.vstack((points_x, layout['x']))\n points_y = np.vstack((points_y, layout['y']))\n savemat(filename, dict(centre_x=centre_x, centre_y=centre_y,\n antennas_x=points_x, antennas_y=points_y))", "def insert_bicluster_info( self, db, db_file, run2id, row2id, col2id ):\n\t\t# Get all biclusters from cmonkey run\n\t\tconn = sqlite3.connect(db_file)\n\t \tc = conn.cursor()\n\t \tc.execute(\"SELECT max(iteration) FROM cluster_stats;\")\n\t \tlast_run = c.fetchone()[0] # i think there is an indexing problem in cMonkey python!! \n\t \tw = (last_run,)\n\t \tc.execute(\"SELECT cluster FROM cluster_stats WHERE iteration = ?;\",w)\n\t\tbiclusters = [self.assemble_bicluster_info_single( db, db_file, c, last_run, i[0], run2id, row2id, col2id ) for i in c.fetchall()]\n\t\tbicluster_info_collection = self.db.bicluster_info\n\n\t\t# Check whether documents are already present in the collection before insertion\n\t\tif bicluster_info_collection.count() > 0:\n\t\t\td_f = filter( None, [ self.check4existence( bicluster_info_collection, i, \"run_id\", i[\"run_id\"], \"cluster\", i[\"cluster\"] ) for i in biclusters ] )\n\t\telse:\n\t\t\td_f = biclusters\n\t\t\n\n\t\tprint \"%s new records to write\" % len( d_f )\n\n\t\tif len(d_f) > 0:\n\t\t\tbicluster_info_collection.insert( d_f )\n\n\t\treturn bicluster_info_collection", "def writePointwiseData(self, writeTo):\n rlz = self._writeSegmentsRealization(writeTo)\n # add some cluster stuff\n # cluster features\n ## both scaled and unscaled\n featureNames = sorted(list(self._clusterInfo['features']['unscaled'].keys()))\n for scaling in ['unscaled','scaled']:\n for name in featureNames:\n varName = 'ClusterFeature|{}|{}'.format(name, scaling)\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['features'][scaling][name])\n varName = 'ClusterLabels'\n writeTo.addVariable(varName, np.array([]), classify='meta', indices=['segment_number'])\n rlz[varName] = np.asarray(self._clusterInfo['labels'])\n writeTo.addRealization(rlz)", "def clustering(self): \n clusterOfFiles=self.getClusters()\n \n #group files based on the hash of their contents\n self.keyingMethod=md5Hash\n [self.addFile(afile) for acluster in clusterOfFiles for afile in acluster]\n clusterOfFiles=self.getClusters()\n self.showClusters(clusterOfFiles)", "def ior_write_dataset(self):\n for oclass in self.obj_class:\n for sizes in self.ior_chu_trs_blk_size:\n # Skip the object type if server count does not meet the minimum\n # EC object server count\n if oclass[1] > self.server_count:\n continue\n self.ior_param_update(oclass, sizes)\n\n # Create the new container with correct redundancy factor\n # for EC object type\n self.ec_contaier_create(oclass[0])\n self.update_ior_cmd_with_pool(oclass=oclass[0],\n create_cont=False)\n # Start IOR Write\n self.container.uuid = self.ec_container.uuid\n self.start_ior_load(operation=\"WriteRead\", percent=1,\n create_cont=False)\n self.cont_uuid.append(self.ior_cmd.dfs_cont.value)", "def add_file(self, letter, block_size):\n cluster = 1\n i = 0\n j = 0\n\n continuous = True\n while(i<self.size and j<block_size):\n if(self.disk_mem[i]==\".\"):\n self.disk_mem[i] = letter\n if not continuous:\n continuous = True\n cluster += 1\n j+=1\n else:\n continuous = False\n i+=1\n return cluster", "def seek_to_cluster(self, cluster):\n self.infile.seek(self.cluster_to_physical_offset(cluster))", "def write_file(self,f=None):\n nrow, ncol, nlay, nper = self.parent.nrow_ncol_nlay_nper\n # Open file for writing\n if f is None:\n f = open(self.fn_path, 'w')\n # First line: heading\n f.write('{}\\n'.format(self.heading))\n # write dataset 1\n f.write('{} {} {} {} {} {} {}\\n'.format(self.ipakcb, self.iswtoc,\n self.nsystm, self.ithk,\n self.ivoid, self.istpcs,\n self.icrcc))\n # write dataset 2\n t = self.lnwt.array\n for tt in t:\n f.write('{} '.format(tt + 1))\n f.write('\\n')\n\n # write dataset 3\n f.write(\n '{} {} {} {} {} {} {} {} {} {}\\n'.format(self.izcfl, self.izcfm,\n self.iglfl, self.iglfm,\n self.iestfl, self.iestfm,\n self.ipcsfl, self.ipcsfm,\n self.istfl, self.istfm))\n\n # write dataset 4\n f.write(self.gl0.get_file_entry())\n\n # write dataset 5\n f.write(self.sgm.get_file_entry())\n\n # write dataset 6\n f.write(self.sgs.get_file_entry())\n\n # write datasets 7 to 13\n for k in range(self.nsystm):\n f.write(self.thick[k].get_file_entry())\n if self.icrcc != 0:\n f.write(self.sse[k].get_file_entry())\n f.write(self.ssv[k].get_file_entry())\n else:\n f.write(self.cr[k].get_file_entry())\n f.write(self.cc[k].get_file_entry())\n f.write(self.void[k].get_file_entry())\n f.write(self.sub[k].get_file_entry())\n\n # write datasets 14 and 15\n for k in range(nlay):\n if self.istpcs != 0:\n f.write(self.pcsoff[k].get_file_entry())\n else:\n f.write(self.pcs[k].get_file_entry())\n\n # write dataset 16 and 17\n if self.iswtoc > 0:\n # dataset 16\n for i in self.ids16:\n f.write('{} '.format(i))\n f.write(' #dataset 16\\n')\n\n # dataset 17\n for k in range(self.iswtoc):\n t = self.ids17[k, :].copy()\n t[0:4] += 1\n for i in t:\n f.write('{} '.format(i))\n f.write(' #dataset 17 iswtoc {}\\n'.format(k + 1))\n\n # close swt file\n f.close()", "def write(self, file):\n pos = file.tell()\n pickle.dump((self.index, self.meta, self.info), file)\n file.seek(0)\n\n # update the header with the position of the content index.\n file.write(struct.pack('<Q', pos))", "def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')", "def store_clusters(mapping, sff_fp, outdir=\"/tmp/\", store_members=False):\r\n\r\n # get mapping read to cluster\r\n invert_map = invert_mapping(mapping)\r\n (flowgrams, header) = lazy_parse_sff_handle(open(sff_fp))\r\n\r\n leftover_fasta_fh = open(outdir + \"/singletons.fasta\", \"w\")\r\n centroids = []\r\n for f in flowgrams:\r\n try:\r\n key = invert_map[f.Name]\r\n except KeyError:\r\n # this flowgram has not been clustered\r\n continue\r\n if (len(mapping[key]) == 0):\r\n # do not store singletons in a separate cluster\r\n leftover_fasta_fh.write(f.toFasta() + \"\\n\")\r\n continue\r\n elif(f.Name in mapping):\r\n # save as a centroid\r\n centroids.append((len(mapping[f.Name]) + 1, f.Name, f.toSeq()))\r\n\r\n if (store_members):\r\n flows_fh = open(outdir + key + \".flows\", \"a\")\r\n fasta_fh = open(outdir + key + \".fasta\", \"a\")\r\n flows_fh.write(\"%s\\n\" % f)\r\n fasta_fh.write(f.toFasta() + \"\\n\")\r\n fasta_fh.close()\r\n flows_fh.close()\r\n\r\n leftover_fasta_fh.close()\r\n\r\n # sort and store ordered by cluster_size\r\n centroids.sort(reverse=True)\r\n centroid_fh = open(outdir + \"/centroids.fasta\", \"w\")\r\n for size, name, seq in centroids:\r\n centroid_fh.write(\">%s | cluster size: %d \\n%s\\n\" %\r\n (name, size, seq))\r\n centroid_fh.close()", "def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):\n\n filename = string.replace(filename,'.pdf','.txt')\n export_text = open(filename,'w')\n column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_text.write(column_header)\n column_clusters = string.join(['column_clusters-flat','-']+ map(str, ind2),'\\t')+'\\n' ### format column-flat-clusters for export\n export_text.write(column_clusters)\n\n ### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match\n new_row_header = new_row_header[::-1]\n xt = xt[::-1]\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\\t')+'\\n')\n i+=1\n export_text.close()\n\n ### Transpose text file for easier reading!\n oldfile_h = open(filename, 'rb')\n\n elements = [ line.split() for line in oldfile_h ]\n oldfile_h.close()\n\n biglist = []\n for splitline in elements:\n #print len(splitline)\n #print splitline\n biglist.append(splitline)\n newarray = numpy.array(biglist)\n #print numpy.shape(newarray)\n t_array = newarray.transpose()\n #print numpy.shape(t_array)\n #print newarray[:,0]\n\n newfile_h = open(filename[:-4] + \"_transposed.txt\" , 'w')\n for row in t_array:\n #print \"The row is currently: %r\" % row\n newfile_h.write(\"\\t\".join(row) + \"\\n\")\n newfile_h.close()\n\n\n ### Export as CDT file\n filename = string.replace(filename,'.txt','.cdt')\n export_cdt = open(filename,'w')\n column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_cdt.write(column_header)\n eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\\t')+'\\n' ### format column-flat-clusters for export\n export_cdt.write(eweight)\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\\t')+'\\n')\n i+=1\n export_cdt.close()", "def proc_dataset_v2(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.divide(Y,X+1e-10)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat_v2.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat_v2.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta_v2.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes_v2.mat', data)\n return T, E, M, data", "def save_cluster_metrics_on_check_point(self) -> None:\n pd.read_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv')\\\n .append(pd.DataFrame(self.cluster_metrics,\n columns=['stream_index', 'timestamp', 'check point', 'cluster id',\n 'x', 'y', 'radius', 'weight', 'cluster type']))\\\n .to_csv(f'{self.path_to_cluster_metrics}/{self.file_name}.csv', index=False)\n self.cluster_metrics = []", "def write_metadata(self, data_set, io=None, location=None):\n if not hasattr(data_set, '_h5_base_group'):\n # added here because loop writes metadata before data itself\n data_set._h5_base_group = self._create_data_object(data_set)\n if 'metadata' in data_set._h5_base_group.keys():\n del data_set._h5_base_group['metadata']\n metadata_group = data_set._h5_base_group.create_group('metadata')\n self.write_dict_to_hdf5(data_set.metadata, metadata_group)", "def proc_dataset_v1(write=False):\n \n path = load_config()\n M = pd.read_csv(path['metadata_file'])\n T = pd.read_csv(path['rna_file'])\n mCH = pd.read_csv(path['mCH_file'])\n CH = pd.read_csv(path['CH_file'])\n\n def format_df(df):\n \"\"\"The inputs are genes x cells. Transpose data and rename columns\"\"\"\n df = df.transpose()\n df.rename(columns=df.iloc[0], inplace=True)\n df.drop('gene', inplace=True)\n df.index.rename('sample_id', inplace=True)\n return df\n\n T = format_df(T)\n mCH = format_df(mCH)\n CH = format_df(CH)\n\n #Update metadata\n M = pd.read_csv(path['metadata_file'])\n M.rename(columns={'Unnamed: 0': 'sample_id'}, inplace=True)\n M.set_index(keys='sample_id', drop=True, inplace=True)\n\n #Sort cells by metadata\n sorted_index = M.sort_values(by='SubClusterAnno').index\n M = M.loc[sorted_index]\n T = T.loc[sorted_index]\n mCH = mCH.loc[sorted_index]\n CH = CH.loc[sorted_index]\n\n assert np.array_equal(CH.columns, mCH.columns), \"Genes are not in the same order\"\n assert np.array_equal(T.columns, mCH.columns), \"Genes are not in the same order\"\n assert M.index.equals(T.index), \"Cells are not in the same order\"\n assert M.index.equals(CH.index), \"Cells are not in the same order\"\n assert M.index.equals(mCH.index), \"Cells are not in the same order\"\n\n # CPM-normalize counts\n X = T.values.astype(np.float32)\n X = (X/np.sum(X, axis=1, keepdims=True))*1e6\n X = np.log1p(X)\n T = pd.DataFrame(data=X, index=T.index, columns=T.columns)\n print('Completed T processing')\n\n # For methylation data\n X = CH.values.astype(float)\n Y = mCH.values.astype(float)\n Z = np.log1p(Y) - np.log1p(X)\n E = pd.DataFrame(data=Z, index=mCH.index, columns=mCH.columns)\n print('Completed E processing')\n\n # select genes based on variance in log normalized CPM values in T\n def calc_highvar_genes(df):\n vars = np.var(df.values, axis=0)\n order_vars = np.argsort(-vars) # descending order\n sorted_highvar_genes = df.columns.values[order_vars]\n return sorted_highvar_genes\n\n data = {'sorted_highvar_T_genes': calc_highvar_genes(T),\n 'sorted_highvar_E_genes': calc_highvar_genes(E)}\n print('Completed finding high variance features')\n\n if write:\n feather.write_dataframe(T, path['data_dir'] / 'T_dat.feather')\n feather.write_dataframe(E, path['data_dir'] / 'E_dat.feather')\n feather.write_dataframe(M, path['data_dir'] / 'Meta.feather')\n sio.savemat(path['data_dir'] / 'highvar_genes.mat', data)\n return T, E, M, data", "def import_and_save(ADCthres=0, s=False):\n df = import_data(ADCthres, s)\n bus_vec = np.array(range(0,3))\n for bus in bus_vec:\n df_clu = cluster_data(df, bus) \n save_clusters(df_clu, bus)", "def _make_data_file(cls, filename):\n cls.cluster.fs.setuser(cls.cluster.superuser)\n f = cls.cluster.fs.open(filename, \"w\")\n for x in range(256):\n f.write(\"%d\\t0x%x\\n\" % (x, x))\n f.close()", "def _put(self, name, chunk, row_offset):\n grp = self.grp[name]\n lo = row_offset\n if isinstance(chunk, pd.Series):\n chunk = chunk.to_frame()\n n_rows = len(chunk)\n else:\n n_rows = len(chunk[next(iter(chunk.keys()))])\n hi = lo + n_rows\n\n for name in chunk.keys():\n\n x = np.asarray(chunk[name])\n\n data, dtype, fillvalue = self._normalize_column(x, x.dtype)\n\n if name in grp.keys():\n dset = grp[name]\n if hi > len(dset):\n dset.resize((hi,))\n dset[lo:hi] = data\n else:\n try:\n enum_dict = h5py.check_dtype(enum=dtype)\n except AttributeError:\n enum_dict = None\n dset = grp.create_dataset(\n name,\n shape=(hi,),\n dtype=dtype,\n data=data,\n fillvalue=fillvalue,\n **self.storage_options\n )\n if enum_dict is not None:\n # store enum dictionary as attribute\n dset.attrs[\"categories\"] = sorted(\n enum_dict, key=enum_dict.__getitem__\n )", "def writeData( self, file, bAddBeginOfDataChunk = True ):\n self.writeSpecificData( file, self.data, bAddBeginOfDataChunk = bAddBeginOfDataChunk )", "def write_star_files(self, star_input, outpath):\n \n with open(star_input, 'r') as f:\n table = parse_star(f)\n\n cluster_star = {}\n\n for cluster, nodes in clusters.items():\n if nodes:\n #convert to str to match df\n #add 1 to match RELION indexing\n avgs = [str(node+1) for node in nodes]\n subset = table[table['ClassNumber'].isin(avgs)]\n cluster_star[cluster] = subset\n\n for cluster, table in cluster_star.items():\n with open(outpath+'/slicem_cluster_{0}.star'.format(cluster), 'w') as f:\n #write the star file\n print('data_', file=f)\n print('loop_', file=f)\n for i, name in enumerate(table.columns):\n print('_rln' + name + ' #' + str(i+1), file=f)\n table.to_csv(f, sep='\\t', index=False, header=False)\n\n with open(outpath+'/slicem_clusters.txt', 'w') as f:\n for cluster, averages in clusters.items():\n f.write(str(cluster) + '\\t' + str(averages) + '\\n')\n \n print('star files written!')", "def test_store_cluster(self):\r\n\r\n self.tmpdir = mkdtemp(dir=\"./\", suffix=\"_store_clusters/\")\r\n\r\n self.files_to_remove.append(self.tmpdir + \"singletons.fasta\")\r\n self.files_to_remove.append(self.tmpdir + \"centroids.fasta\")\r\n\r\n # empty map results in empty files\r\n store_clusters({}, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, [])\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, [])\r\n\r\n # non-empty map creates non-empty files, centroids sorted by size\r\n mapping = {'FZTHQMS01B8T1H': [],\r\n 'FZTHQMS01DE1KN': ['FZTHQMS01EHAJG'],\r\n 'FZTHQMS01EHAJG': [1, 2, 3]} # content doesn't really matter\r\n\r\n centroids = [(\r\n 'FZTHQMS01EHAJG | cluster size: 4', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA'),\r\n ('FZTHQMS01DE1KN | cluster size: 2', 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAACAAGACCATGCGGTCTGATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCA')]\r\n\r\n singletons = [(\r\n 'FZTHQMS01B8T1H',\r\n 'CATGCTGCCTCCCGTAGGAGTTTGGACCGTGTCTCAGTTCCAATGTGGGGGACCTTCCTCTCAGAACCCCTATCCATCGAAGGTTTGGTGAGCCGTTACCTCACCAACTGCCTAATGGAACGCATCCCCATCGATAACCGAAATTCTTTAATAATTAAACCATGCGGTTTTATTATACCATCGGGTATTAATCTTTCTTTCGAAAGGCTATCCCCGAGTTATCGGCAGGTTGGATACGTGTTACTCACCCGTGCGCCGGTCGCCATCACTTA')]\r\n\r\n store_clusters(mapping, self.tiny_test, self.tmpdir)\r\n actual_centroids = list(\r\n parse_fasta(open(self.tmpdir + \"centroids.fasta\")))\r\n self.assertEqual(actual_centroids, centroids)\r\n actual_singletons = list(\r\n parse_fasta(open(self.tmpdir + \"singletons.fasta\")))\r\n self.assertEqual(actual_singletons, singletons)", "def write(self, file):\n #write header\n self.ID.write(file)\n if (self.write_size): \n self.size.write(file)\n for variable in self.variables:\n variable.write(file)\n for subchunk in self.subchunks:\n subchunk.write(file)", "def sent_or_doc_cluster(file_in, file_out, feature, method, n_cluster, show_or_write):\n\n original_file = file_in[0]\n original_words_file = file_in[1]\n file_vec = file_in[2]\n\n if feature.lower() == 'onehot':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, onehot in id_onehot.items():\n x.append(onehot.tolist())\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_cluster=n_cluster).fit(X)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n pass\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n\n elif feature.lower() == 'vec':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, vec in id_vec.items():\n x.append(vec.tolist()) # int object jas nor attribute 'tolist'\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_clusters=n_cluster).fit(X)\n else:\n raise ValueError(\"Method must be 'ap' or \"\n \"'kmeans'. Got %s instead\"\n % method)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n elif feature.lower() == 'doc2vec':\n # word2vec.doc2vec\n pass\n else:\n raise ValueError(\n \"Feature must be 'onehot' or 'vec' or 'doc2vec'. Got %s instead\" % feature)\n pass\n\n pass", "def save(self, file: Union[str, BinaryIO]=None) -> bytes:\n # Store all the chunks data as zlib compressed nbt data\n chunks_data = []\n for chunk in self.chunks:\n if chunk is None:\n chunks_data.append(None)\n continue\n chunk_data = BytesIO()\n if isinstance(chunk, Chunk):\n nbt_data = nbt.NBTFile()\n nbt_data.tags.append(nbt.TAG_Int(name='DataVersion', value=chunk.version))\n nbt_data.tags.append(chunk.data)\n else:\n nbt_data = chunk.save()\n nbt_data.write_file(buffer=chunk_data)\n chunk_data.seek(0)\n chunk_data = zlib.compress(chunk_data.read())\n chunks_data.append(chunk_data)\n\n # This is what is added after the location and timestamp header\n chunks_bytes = bytes()\n offsets = []\n for chunk in chunks_data:\n if chunk is None:\n offsets.append(None)\n continue\n # 4 bytes are for length, b'\\x02' is the compression type which is 2 since its using zlib\n to_add = (len(chunk)+1).to_bytes(4, 'big') + b'\\x02' + chunk\n\n # offset in 4KiB sectors\n sector_offset = len(chunks_bytes) // 4096\n sector_count = math.ceil(len(to_add) / 4096)\n offsets.append((sector_offset, sector_count))\n\n # Padding to be a multiple of 4KiB long\n to_add += bytes(4096 - (len(to_add) % 4096))\n chunks_bytes += to_add\n\n locations_header = bytes()\n for offset in offsets:\n # None means the chunk is not an actual chunk in the region\n # and will be 4 null bytes, which represents non-generated chunks to minecraft\n if offset is None:\n locations_header += bytes(4)\n else:\n # offset is (sector offset, sector count)\n locations_header += (offset[0] + 2).to_bytes(3, 'big') + offset[1].to_bytes(1, 'big')\n\n # Set them all as 0\n timestamps_header = bytes(4096)\n\n final = locations_header + timestamps_header + chunks_bytes\n\n # Pad file to be a multiple of 4KiB in size\n # as Minecraft only accepts region files that are like that\n final += bytes(4096 - (len(final) % 4096))\n assert len(final) % 4096 == 0 # just in case\n\n # Save to a file if it was given\n if file:\n if isinstance(file, str):\n with open(file, 'wb') as f:\n f.write(final)\n else:\n file.write(final)\n return final", "def _distribute_data_to_cluster(self):\n\n for data in self.data:\n _distances = self._calculate_distances(data)\n _cluster = self._get_closest_cluster(_distances)\n self.clusters[_cluster].append(data)", "def updateAnnotations(self):\n self.backupDatafiles()\n print(\"Updating annotation files \", self.field(\"trainDir\"))\n listOfDataFiles = QDir(self.field(\"trainDir\")).entryList(['*.data'])\n for file in listOfDataFiles:\n # Read the annotation\n segments = Segment.SegmentList()\n newsegments = Segment.SegmentList()\n segments.parseJSON(os.path.join(self.field(\"trainDir\"), file))\n allSpSegs = np.arange(len(segments)).tolist()\n newsegments.metadata = segments.metadata\n for segix in allSpSegs:\n seg = segments[segix]\n if self.field(\"species\") not in [fil[\"species\"] for fil in seg[4]]:\n newsegments.addSegment(seg) # leave non-target segments unchanged\n else:\n for seg2 in self.segments:\n if seg2[1] == seg:\n # find the index of target sp and update call type\n seg[4][[fil[\"species\"] for fil in seg[4]].index(self.field(\"species\"))][\"calltype\"] = self.clusters[seg2[-1]]\n newsegments.addSegment(seg)\n newsegments.saveJSON(os.path.join(self.field(\"trainDir\"), file))", "def _processing( infile, rchr, dist, outf ):\n\n coords, sizes = build_dict(infile)\n qry_chrs = list(coords.keys())\n\n print(\"Primary\\tHaplotig\\tPrimary_Start\\tPrimary_end\\tHaplotig_Start\\tHaplotig_End\\tHaplotig_Length\", file=outf)\n for qchr in qry_chrs:\n refcoords = coords[qchr][0]\n qrycoords = coords[qchr][1]\n refst, refend, qryst, qryend = \\\n clustering( refcoords, sorted(qrycoords), sizes[qchr], dist )\n\n print(\"%s\\t%s\\t%d\\t%d\\t%d\\t%d\\t%d\" % \\\n (rchr, qchr, refst, refend, qryst, qryend, sizes[qchr]), file=outf)" ]
[ "0.56617075", "0.56513876", "0.561701", "0.55935436", "0.5525497", "0.5482356", "0.54683065", "0.54394776", "0.5432194", "0.5414132", "0.5392", "0.5390164", "0.53824586", "0.53770757", "0.5337196", "0.5310381", "0.5272025", "0.5262886", "0.5257946", "0.52343404", "0.52302665", "0.51557195", "0.5148239", "0.5117391", "0.51146483", "0.51024026", "0.5072759", "0.50514144", "0.50434846", "0.50263464" ]
0.60996723
0
clears the slackspace of files. Information of them is stored in metadata.
def clear(self): if self.fs_type == 'FAT': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = FATAllocatorMeta(file_metadata) self.fs.clear(file_metadata) elif self.fs_type == 'NTFS': for file_entry in self.metadata.get_files(): file_metadata = file_entry['metadata'] file_metadata = NTFSAllocatorMeta(file_metadata) self.fs.clear(file_metadata) else: raise NotImplementedError()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def clean():\n clean_files()", "def clean_files(self):\n self.filenames.clear()", "def clear(self):\n\n Console.info(\"Cleaning sprite files...\")\n Console.indent()\n \n for dirPath, dirNames, fileNames in os.walk(self.base):\n for fileName in fileNames:\n if fileName.startswith(\"jasysprite\"):\n filePath = os.path.join(dirPath, fileName)\n Console.debug(\"Removing file: %s\", filePath)\n os.remove(filePath)\n \n Console.outdent()", "def clean(self):\r\n\r\n for _, data in self.composition.items():\r\n index_file = Path(data['file'] + '.fxi')\r\n if index_file.exists():\r\n index_file.unlink()", "def clear_data():\n for i in range(_MAX_NUM_TESTS):\n rand, ref = filename(i)\n if os.path.exists(rand):\n os.remove(rand)\n if os.path.exists(ref):\n os.remove(ref)", "def cleanup(self):\r\n for f in [i for d in self.data.values() for i in d[\"filenames\"]]:\r\n try:\r\n os.unlink(f)\r\n except Exception: pass\r\n self.Destroy()", "def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())", "def clear_lists(self): \n self.fp_config_files = []\n self.txt_files = []\n self.fr_config_files = []", "def __del__(self):\n for filename in self.files:\n unlink(filename)", "def cleanup_files(self):\n\n self.backup_files()\n self.delete_files()", "def clear():", "def clean(self):\n\t\tself.archiver.closeFile()", "def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')", "def clear_all(self):\n self.clear_files_paths()\n self.clear_programs()", "def clear_files_paths(self):\n del self.__files_paths[:]", "def reset_memory(self, path):\n files_to_delete = os.listdir(path)\n for file in files_to_delete:\n os.remove(path + \"/\" + str(file))\n self.term_dictionary.clear()", "def erase_files(self):\n self.ofile_handle()\n self.efile_handle()\n\n os.remove(self.ofile_name())\n os.remove(self.efile_name())\n return None", "def _clear_variables( self ):\r\n self.navigation = None\r\n self.resPath = None\r\n self.resolutions = None\r\n self.currentResolution = None\r\n self.resolution = None\r\n for doc in self.include_doc:\r\n try: doc.unlink()\r\n except: pass", "def clear_data():\n dir_list = [\"generated/*\", \"pub/static/*\", \"var/cache/*\", \"var/page_cache/*\", \"var/view_preprocessed/*\", \"var/tmp/*\"]\n\n for item in dir_list:\n print(\"[ - ] Removing\", item, \"\\n\")\n subprocess.run([\"rm\", \"-rf\", item])", "def clear(self, cacheDir):", "def clear_old_files(self):\n self.logger.logMsg(\"Clearing Old Files.....\")\n try:\n for files in os.listdir(self.download_path):\n path = os.path.join(self.download_path, files)\n os.remove(path)\n for files in os.listdir(self.outpath):\n path = os.path.join(self.outpath, files)\n os.remove(path)\n except Exception as e:\n self.logger.logError(\"Error Creating Old Files {}.....\".format(str(e)))\n raise Exception('Error in Clearing Old Files')\n\n self.logger.logMsg(\"Done Clearing Old Files.....\")", "def delFiles(self):\r\n \r\n logStr = \"{0:s}.{1:s}: \".format(self.__class__.__name__, sys._getframe().f_code.co_name)\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'Start.')) \r\n \r\n try: \r\n if os.path.exists(self.h5File): \r\n os.remove(self.h5File) \r\n logger.debug(\"{0:s} File {1:s} deleted.\".format(logStr,self.h5File)) \r\n except XmError:\r\n raise \r\n except Exception as e:\r\n logStrFinal=\"{:s}Exception: Line: {:d}: {!s:s}: {:s}\".format(logStr,sys.exc_info()[-1].tb_lineno,type(e),str(e))\r\n logger.error(logStrFinal) \r\n raise XmError(logStrFinal) \r\n finally:\r\n logger.debug(\"{0:s}{1:s}\".format(logStr,'_Done.'))", "def _clean_files(self):\n if self.delfiles & 1:\n ProcUtils.remove(self.okm)\n if self.delfiles & 2:\n ProcUtils.remove(self.hkm)\n if self.delfiles & 4:\n ProcUtils.remove(self.qkm)\n if self.delfiles & 8:\n ProcUtils.remove(self.obc)\n\n if self.log is False:\n ProcUtils.remove(self.pcf_file)\n base = os.path.basename(self.okm)\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogReport', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogStatus', base])))\n ProcUtils.remove(os.path.join(self.dirs['run'],\n '.'.join(['LogUser', base])))", "def _cleanup(self):\n os.system(\"rm -r %s/*\" %(self._snippet_index_dir))\n os.system(\"rm %s/*\" %(self._para_dir))\n os.system(\"rm %s/*\" %(self._temp_dir))\n os.system(\"rm %s/*\" %(self._snippet_result_dir))", "def clean(self):\n\n for metric in self.metricList:\n listf = glob.glob(\n '{}/*_{}_{}*'.format(self.outDir, metric.name, self.num))\n if len(listf) > 0:\n for val in listf:\n os.system('rm {}'.format(val))", "def _clean_workdir(self):\n\t\ttoremove = [self._get_config_filepath(), self._get_params_filepath(), self._get_conv_filepath(), self._get_psf_filepath()]\n\t\tfor filepath in toremove:\n\t\t\tif os.path.exists(filepath):\t\n\t\t\t\tlogger.debug(\"Removing existing file %s...\" % (filepath))\n\t\t\t\tos.remove(filepath)", "def __del__(self):\n for f in self._files:\n f.close()", "def clean(self):\n files = ['CHG', 'CHGCAR', 'POSCAR', 'INCAR', 'CONTCAR',\n 'DOSCAR', 'EIGENVAL', 'IBZKPT', 'KPOINTS', 'OSZICAR',\n 'OUTCAR', 'PCDAT', 'POTCAR', 'vasprun.xml',\n 'WAVECAR', 'XDATCAR', 'PROCAR', 'ase-sort.dat',\n 'LOCPOT', 'AECCAR0', 'AECCAR1', 'AECCAR2',\n 'WAVECAR.GTO', 'vasp.out', 'vasp.err']\n for f in files:\n try:\n os.remove(f)\n except OSError:\n pass", "def teardown():\n for filename in files_to_delete:\n delete_file(filename)", "def withdraw(self):\n files = self._file_list\n for f in files:\n remove(str(f))\n self._file_list = []\n self._filename = \"\"" ]
[ "0.74671215", "0.7415675", "0.6979348", "0.6971708", "0.6971565", "0.689763", "0.68770945", "0.6817018", "0.6782567", "0.67809623", "0.6693809", "0.666803", "0.666092", "0.6653605", "0.6635497", "0.66262114", "0.6615348", "0.66098607", "0.6608693", "0.65980065", "0.6585141", "0.6583147", "0.6582249", "0.658113", "0.6569132", "0.65410054", "0.6535095", "0.6534438", "0.6533088", "0.6526979" ]
0.74563277
1
Sets the namespace_name of this ClairpbVulnerability.
def namespace_name(self, namespace_name): self._namespace_name = namespace_name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_namespace(self, namespace: str) -> None:\n self._namespace = namespace", "def namespace(self, namespace: str):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def namespace(self, namespace):\n\n self._namespace = namespace", "def set_target_namespace(self, namespace):\n # do shit\n self.target_namespace = namespace.strip(\":\")", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.name]", "def set_test_namespace_value(namespace_name=None):\r\n global namespace_value\r\n namespace_value = namespace_name", "def set_ns_prefix(\n self, ns_for_name: Dict[str, Tuple[str, str]], c_ns: str, f_ns: str\n ) -> None:\n self.c_prefix = c_ns\n self.f_prefix = f_ns", "def conventionalize_namespace(self, namespace: str) -> str:\n return _conventionalize(self.options, \"namespace\", namespace)", "def nvmf_namespace_num(self, nvmf_namespace_num):\n\n self._nvmf_namespace_num = nvmf_namespace_num", "def __setattr__(self, name, value):\n if not isinstance(name, str):\n raise ValueError('Namespace label must be a string')\n if name.startswith('_'):\n raise ValueError('Namespace cannot start with an underscore')\n\n if name in self._namespaces:\n raise ValueError('Namespaces cannot be redefined')\n\n self._namespaces[name] = Namespace(name, label=value)", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n self.c_prefix, self.f_prefix = ns_for_name[self.class_name]\n self.ret_type.set_ns_prefix(ns_for_name, self.c_prefix, self.f_prefix)\n for param in self.params:\n param.set_ns_prefix(ns_for_name, self.c_prefix, self.f_prefix)", "def setScope(self, fileBasename):\n self.fileBasename = fileBasename\n scopeNamespace = self.defaultNamespacePrefix + fileBasename + '/'\n \n # Annotations go to a different namespace\n annotationScopeNamespace = self.annotationsNamespacePrefix + fileBasename + '/'\n \n self.log.debug('Adding namespace for {0}: {1}'.format(fileBasename, scopeNamespace))\n \n self.namespaces['scope'] = Namespace(scopeNamespace)\n self.annotationNamespaces['scope'] = Namespace(annotationScopeNamespace)\n self.graph.namespace_manager.bind('', self.namespaces['scope'])\n self.annotationGraph.namespace_manager.bind('', self.annotationNamespaces['scope'])", "def setElementNamespace(self, *args):\n return _libsbml.ASTBasePlugin_setElementNamespace(self, *args)", "def setNs(self, ns):\n if ns is None: ns__o = None\n else: ns__o = ns._o\n libxml2mod.xmlSetNs(self._o, ns__o)", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n for member in self.members:\n member.set_ns_prefix(ns_for_name)", "def __init__(self, name: str, namespace: str):\n self.name = name\n self.namespace = namespace", "def set_ns_prefix(self, ns_for_name: Dict[str, Tuple[str, str]]) -> None:\n for instance in self.instances:\n instance.set_ns_prefix(ns_for_name)", "def as_namespace_name(name, version):\n return name + ':' + version", "def setElementNamespace(self, *args):\n return _libsbml.SBasePlugin_setElementNamespace(self, *args)", "def set_namespace(self, namespace):\n if not isinstance(namespace, NamespaceModel):\n raise ConfigException(\"given an object that is not \"\n \"a kind of NamespaceModel: %s\" % str(namespace))\n self.namespace_model_instance = namespace", "def test_replace_net_namespace(self):\n pass", "def set_doc_namespace(self, doc, namespace):\n if not self.doc_namespace_set:\n self.doc_namespace_set = True\n if validations.validate_doc_namespace(namespace):\n doc.namespace = namespace\n return True\n else:\n raise SPDXValueError('Document::Namespace')\n else:\n raise CardinalityError('Document::Comment')", "def set_name(self, name):\r\n self.stream.set_node_name(self.node, name)", "def namespace_group_num(self, namespace_group_num):\n\n self._namespace_group_num = namespace_group_num", "def setname(self, name):\n self.__name = name", "def replace_namespaced_net_namespace(self, body, name, **kwargs):\n\n all_params = ['body', 'name', 'pretty']\n all_params.append('callback')\n\n params = locals()\n for key, val in iteritems(params['kwargs']):\n if key not in all_params:\n raise TypeError(\n \"Got an unexpected keyword argument '%s'\"\n \" to method replace_namespaced_net_namespace\" % key\n )\n params[key] = val\n del params['kwargs']\n\n # verify the required parameter 'body' is set\n if ('body' not in params) or (params['body'] is None):\n raise ValueError(\"Missing the required parameter `body` when calling `replace_namespaced_net_namespace`\")\n # verify the required parameter 'name' is set\n if ('name' not in params) or (params['name'] is None):\n raise ValueError(\"Missing the required parameter `name` when calling `replace_namespaced_net_namespace`\")\n\n resource_path = '/oapi/v1/netnamespaces/{name}'.replace('{format}', 'json')\n path_params = {}\n if 'name' in params:\n path_params['name'] = params['name']\n\n query_params = {}\n if 'pretty' in params:\n query_params['pretty'] = params['pretty']\n\n header_params = {}\n\n form_params = []\n local_var_files = {}\n\n body_params = None\n if 'body' in params:\n body_params = params['body']\n\n # HTTP header `Accept`\n header_params['Accept'] = self.api_client.\\\n select_header_accept(['application/json', 'application/yaml'])\n if not header_params['Accept']:\n del header_params['Accept']\n\n # HTTP header `Content-Type`\n header_params['Content-Type'] = self.api_client.\\\n select_header_content_type(['*/*'])\n\n # Authentication setting\n auth_settings = []\n\n response = self.api_client.call_api(resource_path, 'PUT',\n path_params,\n query_params,\n header_params,\n body=body_params,\n post_params=form_params,\n files=local_var_files,\n response_type='V1NetNamespace',\n auth_settings=auth_settings,\n callback=params.get('callback'))\n return response", "def setNamespaces(self, *args):\n return _libsbml.SBase_setNamespaces(self, *args)", "def set_qname(self, qname):\n self._qname = qname", "def setOpenIDNamespace(self, openid_ns_uri, implicit):\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri" ]
[ "0.7218368", "0.6848428", "0.6652187", "0.6652187", "0.6394273", "0.61235774", "0.60623956", "0.59424466", "0.5783313", "0.57451713", "0.57072073", "0.56936276", "0.56725025", "0.5653841", "0.5579495", "0.55766636", "0.55388397", "0.5538237", "0.5520687", "0.55004734", "0.5398711", "0.5376789", "0.53735054", "0.5372944", "0.5346059", "0.5335326", "0.5334406", "0.5331527", "0.53207177", "0.5314207" ]
0.7917395
0
Sets the severity of this ClairpbVulnerability.
def severity(self, severity): self._severity = severity
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def severity(self, severity):\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n if severity is not None and len(severity) > 200:\n raise ValueError(\"Invalid value for `severity`, length must be less than or equal to `200`\") # noqa: E501\n if severity is not None and len(severity) < 1:\n raise ValueError(\"Invalid value for `severity`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._severity = severity", "def severity(self, severity):\n if severity is None:\n raise ValueError(\"Invalid value for `severity`, must not be `None`\") # noqa: E501\n if severity is not None and len(severity) < 1:\n raise ValueError(\"Invalid value for `severity`, length must be greater than or equal to `1`\") # noqa: E501\n\n self._severity = severity", "def severity_name(self, severity_name):\n\n self._severity_name = severity_name", "def severity(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> str:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"severity\")", "def setSeverityOverride(self, *args):\n return _libsbml.XMLErrorLog_setSeverityOverride(self, *args)", "def severity(self):\n return self._severity", "def severity(self):\n return self._severity", "def changeErrorSeverity(self, *args):\n return _libsbml.XMLErrorLog_changeErrorSeverity(self, *args)", "def severity(self) -> Optional[pulumi.Input['TestIssueSeverity']]:\n return pulumi.get(self, \"severity\")", "def set_verbosity(self,verbosity):\n type_name = type(verbosity).__name__\n if re.search('int',type_name) != None:\n \n # It is an integer, tes bounds\n if verbosity < 4 and verbosity > -1:\n self.verbosity = verbosity\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be either 0, 1, 2 or 3.\")\n else:\n raise KINSOL_Exception(\"The variable sent to 'set_verbosity' must be an integer.\")", "def severity(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"severity\")", "def severity(self) -> pulumi.Input['EndpointSeverity']:\n return pulumi.get(self, \"severity\")", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def setThreshold(self, v):\n self._set(threshold=v)\n return self", "def normalise_severity(self, severity):\n return \"Info\" if severity == \"Unknown\" else severity", "def set_verbosity(self, verbosity):\n if verbosity == 0:\n self.__logger.setLevel(logging.CRITICAL)\n if verbosity == 1:\n self.__logger.setLevel(logging.ERROR)\n if verbosity == 2:\n self.__logger.setLevel(logging.WARNING)\n if verbosity == 3:\n self.__logger.setLevel(logging.INFO)\n if verbosity >= 4:\n self.__logger.setLevel(logging.DEBUG)", "def severity_justification(self, severity_justification):\n\n self._severity_justification = severity_justification", "def setThresholdLevel(self, *args):\n return _libsbml.Input_setThresholdLevel(self, *args)" ]
[ "0.77666795", "0.738541", "0.73160356", "0.7266142", "0.62618464", "0.6216128", "0.59445876", "0.59445876", "0.59445876", "0.5885588", "0.5885588", "0.5885588", "0.58778495", "0.58508825", "0.58508825", "0.562879", "0.5625883", "0.55735195", "0.55235606", "0.55169725", "0.5506324", "0.542118", "0.542118", "0.542118", "0.542118", "0.542118", "0.53888", "0.5325664", "0.5312567", "0.52744985" ]
0.776778
0
Sets the fixed_by of this ClairpbVulnerability.
def fixed_by(self, fixed_by): self._fixed_by = fixed_by
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def fixed_amount(self, fixed_amount):\n\n self._fixed_amount = fixed_amount", "def issued_by(self, issued_by):\n\n self._issued_by = issued_by", "def mitigated_by(self, mitigated_by):\n\n self._mitigated_by = mitigated_by", "def found_by(self, found_by):\n\n self._found_by = found_by", "def changed_by(self, changed_by):\n\n self._changed_by = changed_by", "def updated_by(self, updated_by):\n\n self._updated_by = updated_by", "def assigned_by_user(self, assigned_by_user):\n\n self._assigned_by_user = assigned_by_user", "def fixed_location(self, fixed_location):\n\n self._fixed_location = fixed_location", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def created_by(self, created_by):\n\n self._created_by = created_by", "def regulatory_body_approved_by(self, regulatory_body_approved_by):\n\n self._regulatory_body_approved_by = regulatory_body_approved_by", "def last_reviewed_by(self, last_reviewed_by):\n\n self._last_reviewed_by = last_reviewed_by", "def defect_review_requested_by(self, defect_review_requested_by):\n\n self._defect_review_requested_by = defect_review_requested_by", "def _setbeneficiary_customer_59F(self, val):\n self.swift_obj.BeneficiaryCustomer_F = val\n self.swift_obj.BeneficiaryCustomer_F.swiftTag = '59F'", "def allowed_by_team_id(self, allowed_by_team_id):\n\n self._allowed_by_team_id = allowed_by_team_id", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def last_modified_by(self, last_modified_by):\n\n self._last_modified_by = last_modified_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def updated_by(self, updated_by: \"str\"):\n self._attrs[\"updatedBy\"] = updated_by", "def review_requested_by(self, review_requested_by):\n\n self._review_requested_by = review_requested_by", "def created_by_id(self, created_by_id):\n\n self._created_by_id = created_by_id", "def created_by_security_user_id(self, created_by_security_user_id):\n\n self._created_by_security_user_id = created_by_security_user_id", "def _determine_uploader_by_changedby_field(self):\n maintainer_string = self.changes.get('Changed-By')\n log.debug(\"Determining user from 'Changed-By:' field: %s\" % maintainer_string)\n maintainer_realname, maintainer_email_address = email.utils.parseaddr(maintainer_string)\n log.debug(\"Changed-By's email address is: %s\", maintainer_email_address)\n self._find_user_by_email_address(maintainer_email_address)", "def amended_by(self, amended_by):\n\n self._amended_by = amended_by" ]
[ "0.6088768", "0.6088768", "0.60354835", "0.594974", "0.56890184", "0.5542011", "0.5290293", "0.5184325", "0.5179588", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.51558876", "0.5038125", "0.4941069", "0.49050403", "0.479892", "0.47826055", "0.47764128", "0.47764128", "0.47333765", "0.47333765", "0.47333765", "0.4714392", "0.47048336", "0.4695162", "0.46468732", "0.46423575" ]
0.8097117
0
Sets the affected_versions of this ClairpbVulnerability.
def affected_versions(self, affected_versions): self._affected_versions = affected_versions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def vulnerabilities(self, vulnerabilities):\n\n self._vulnerabilities = vulnerabilities", "def versions(self, versions):\n\n self._versions = versions", "def set_versions(self, consumer, versions):\n for resource_type, resource_version in versions.items():\n self._set_version(consumer, resource_type,\n resource_version)\n\n if versions:\n self._cleanup_removed_versions(consumer, versions)\n else:\n self._handle_no_set_versions(consumer)", "def vulnerable_versions(self):\n raise NotImplementedError()", "def update_versions(consumer, resource_versions):\n _get_cached_tracker().update_versions(consumer, resource_versions)", "def update_versions(self, reference_resolution):\n raise NotImplementedError(\"update_versions is not implemented\")", "def max_affected_version(self, max_affected_version):\n\n self._max_affected_version = max_affected_version", "def pipeline_versions(self, pipeline_versions):\n if (self.local_vars_configuration.client_side_validation and\n pipeline_versions is not None and not isinstance(pipeline_versions, int)):\n raise ValueError(\"Parameter `pipeline_versions` must be an integer\") # noqa: E501\n\n self._pipeline_versions = pipeline_versions", "def test_changeVersions(self):\n self._testVersionChanging(8, 2, 3)", "def set_vectors(self, vecs):\n self.vecs = vecs[:]", "def update_versions(self, new_versions_list):\n to_stop = [version for version in self if version not in new_versions_list]\n for version_id in to_stop:\n del self[version_id]\n\n for version_id in new_versions_list:\n if version_id not in self:\n self[version_id] = VersionManager(\n self.zk_client, self.project_id, self.service_id, version_id,\n self.callback)\n\n self[version_id].ensure_watch()", "def _get_affected_versions(self, rules, versions):\n affected = []\n for ver in versions:\n for rule in rules:\n # If there is a singular rule Ex >=2.1.1\n if len(rule) == 1:\n if self._is_relation_applicable(rule[0]['key'], ver, rule[0]['val']):\n affected.append(ver)\n # If there are 2 rules Ex >=2.1.1 & <2.1.5\n elif len(rule) == 2:\n key0 = rule[0]['key']\n key1 = rule[1]['key']\n first = self._is_relation_applicable(key0, ver, rule[0]['val'])\n second = self._is_relation_applicable(key1, ver, rule[1]['val'])\n if first and second:\n affected.append(ver)\n else:\n if '=' in key0:\n if self._is_relation_applicable(\"=\", ver, rule[0]['val']):\n affected.append(ver)\n elif '=' in key1:\n if self._is_relation_applicable(\"=\", ver, rule[1]['val']):\n affected.append(ver)\n return list(set(affected))", "def affected_portfolios(self, affected_portfolios):\n if self.local_vars_configuration.client_side_validation and affected_portfolios is None: # noqa: E501\n raise ValueError(\"Invalid value for `affected_portfolios`, must not be `None`\") # noqa: E501\n\n self._affected_portfolios = affected_portfolios", "def get_affected_versions(self, rules, versions):\n affected = []\n for ver in versions:\n for rule in rules:\n # If there is a singular rule Ex >=2.1.1\n if len(rule) == 1:\n if self._is_relation_applicable(rule[0]['key'], ver, rule[0]['val']):\n affected.append(ver)\n # If there are 2 rules Ex >=2.1.1 & <2.1.5\n elif len(rule) == 2:\n key0 = rule[0]['key']\n key1 = rule[1]['key']\n first = self._is_relation_applicable(key0, ver, rule[0]['val'])\n second = self._is_relation_applicable(key1, ver, rule[1]['val'])\n if first and second:\n affected.append(ver)\n else:\n if '=' in key0:\n if self._is_relation_applicable(\"=\", ver, rule[0]['val']):\n affected.append(ver)\n elif '=' in key1:\n if self._is_relation_applicable(\"=\", ver, rule[1]['val']):\n affected.append(ver)\n return list(set(affected))", "def min_affected_version(self, min_affected_version):\n\n self._min_affected_version = min_affected_version", "def versions(self):\n raise Exception(\"mcapi.Datafile.versions is not implemented\")", "def set_affected_nodes(self, affected_vertices_file):\n self.affected_nodes = pd.read_csv(affected_vertices_file,\n delimiter=self.delimiter,\n dtype='int32',\n header=None,\n engine='python').values\n self.affected_nodes += self.force_offset", "def cpe_vulnerabilities(self, _nvd_cls, _cpe_cls):\n db = get_thread_scoped_session()\n if not _nvd_cls or not _cpe_cls:\n _nvd_cls, _cpe_cls = select_nvd_classes(db)\n cpe_vulnerabilities = db.query(ImageCpe, _cpe_cls).filter(\n ImageCpe.image_id == self.id,\n ImageCpe.image_user_id == self.user_id,\n func.lower(ImageCpe.name) == _cpe_cls.name,\n ImageCpe.version == _cpe_cls.version\n ).options(joinedload(_cpe_cls.parent, innerjoin=True)).all()\n\n # vulndb is similar to nvd cpes, add them here\n cpe_vulnerabilities.extend(\n db.query(ImageCpe, VulnDBCpe).filter(\n ImageCpe.image_id == self.id, ImageCpe.image_user_id == self.user_id,\n func.lower(ImageCpe.name) == VulnDBCpe.name,\n ImageCpe.version == VulnDBCpe.version,\n VulnDBCpe.is_affected.is_(True)\n ).options(joinedload(VulnDBCpe.parent, innerjoin=True)).all())\n\n return cpe_vulnerabilities", "def affected_orders(self, affected_orders):\n if self.local_vars_configuration.client_side_validation and affected_orders is None: # noqa: E501\n raise ValueError(\"Invalid value for `affected_orders`, must not be `None`\") # noqa: E501\n\n self._affected_orders = affected_orders", "def _update_versions_watch(self, new_versions_list):\n if self._stopped:\n return False\n\n persistent_update_versions = retry_children_watch_coroutine(\n self.versions_node, self.update_versions\n )\n main_io_loop = IOLoop.instance()\n main_io_loop.add_callback(persistent_update_versions, new_versions_list)", "def vcpus(self, vcpus):\n self._vcpus = vcpus", "def set_verbosity(self, value):\n for source in self._sources.itervalues():\n source.verbosity = value", "def update_probabilities(self):\n self.probabilities = self.pheromones**self.EXP_PH * self.mcv**self.EXP_MCV", "def setConsistencyChecks(self, *args):\n return _libsbml.SBMLDocument_setConsistencyChecks(self, *args)", "def set_lives(self, new_number_of_lives):\n self.__lives = new_number_of_lives", "def updateVersions(self):\r\n f = open('../versions.pckl', 'wb')\r\n pickle.dump(self.versions, f)\r\n f.close()", "def get_vulnerabilities(self, **kwargs):\n ...", "def setVersion(self, *args):\n\n self._version = '.'.join( [str(arg) for arg in args] )", "def _recalculate_versions(self):\n versions = self._get_local_resource_versions()\n for versions_dict in self._versions_by_consumer.values():\n for res_type, res_version in versions_dict.items():\n versions[res_type].add(res_version)\n self._versions = versions" ]
[ "0.609255", "0.609255", "0.59750617", "0.5626721", "0.54159355", "0.51944435", "0.51533484", "0.5120908", "0.50574327", "0.50420326", "0.50411284", "0.50248915", "0.48979875", "0.4828377", "0.48191965", "0.4787847", "0.46811602", "0.4634885", "0.46162087", "0.4607605", "0.46066567", "0.45801947", "0.44978493", "0.4497593", "0.44804046", "0.44680262", "0.44612357", "0.445444", "0.44413474", "0.44246194" ]
0.8137943
0
Optimizes the distribution of allocations for a set of stock symbols.
def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \ syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False): # Read in adjusted closing prices for given symbols, date range dates = pd.date_range(sd, ed) prices_all = get_data(syms, dates) # automatically adds SPY prices = prices_all[syms] # only portfolio symbols prices_SPY = prices_all['SPY'] # only SPY, for comparison later # find the allocations for the optimal portfolio #1 provide an initial guess for x allocs = np.ones(len(syms))/len(syms) #2 Provide constraints to the optimizer bounds = [(0,1) for i in syms] constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) }) #3 call the optimizer res = spo.minimize(get_sharpe_ratio, allocs, args=prices, bounds = bounds, constraints=constraints) allocs = res.x # Get daily portfolio value port_val = get_portfolio_value(prices, allocs, 1.0) # Get portfolio statistics cr, adr, sddr, sr = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252) # Compare daily portfolio value with SPY using a normalized plot if gen_plot: # add code to plot here df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1) plot_normalized_data(df_temp) return allocs, cr, adr, sddr, sr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def big_analysis(beta0s=[0.5, 0.8, 1.1, 1.4, 1.7], ks=range(6), betaps=[1.2, 1.5, 2, 3]):", "def test_allocation_strategy_opt_allocs():\n prices = np.array([[10, 10], [11, 15], [12, 5], [13, 10]])\n allocs = AllocationStrategy.opt_allocs(prices, neg_sharpe_ratio)\n np.testing.assert_almost_equal(allocs, [1, 0])", "def prepare_optimization(items,schedule,df_pred):\n itemblocks_to_produce = schedule[itemnames()].sum(0).to_dict()\n blocks_available = schedule.blockid.unique()\n block_order = pd.unique(schedule.blockid)\n forecasted_block_prices = df_pred['forecasted_price'].to_dict()\n actual_block_prices = df_pred['price'].to_dict()\n item_consumptions = items.set_index('item').consumption.to_dict()\n return(itemblocks_to_produce,blocks_available,forecasted_block_prices,\n actual_block_prices,item_consumptions,block_order)", "def knapsack(items, capacity):\r\n pass", "def get_new_allocation(self, day, init=False):\n \"\"\n if init and self.data_train is None:\n # Use uniform allocation\n cur_day_op = self.data.get_op(relative=False)[day, :] # opening prices on |cur_day|\n return util.get_uniform_allocation(self.num_stocks, cur_day_op)\n\n predicted_price_rel = self.predict_price_relatives(day)\n\n # Compute mean price relative of available stocks (x bar at t+1)\n today_op = self.data.get_op(relative=False)[day, :]\n avail_stocks = util.get_avail_stocks(today_op)\n avail_idxs = util.get_available_inds(avail_stocks)\n ppr_avail = predicted_price_rel[avail_idxs] # predicted price relatives of available stocks\n mean_price_rel = np.mean(ppr_avail)\n\n lam = self.compute_lambda(ppr_avail, mean_price_rel, avail_idxs) # lambda at t+1\n\n # limit lambda to avoid numerical problems from acting too aggressively.\n # (referenced from marigold's implementation: https://github.com/Marigold/universal-portfolios)\n lam = min(100000, lam)\n\n # Note: we don't perform simplex project b/c negative values (shorting) is allowed.\n new_b = np.zeros(self.num_stocks)\n for i, _ in enumerate(new_b):\n ppr = predicted_price_rel[i]\n if ppr > 0:\n new_b[i] = self.b[i] + lam * (ppr - mean_price_rel)\n\n # Normalize b so that it sums to 1\n sum_b = np.linalg.norm(new_b, ord=1)\n return (1.0 / sum_b) * new_b", "def recalc_stocks(stocks, feature, args):\r\n \r\n for stock in stocks:\r\n expression = 'stock.' + feature + '_calc(' + args + ')'\r\n exec(expression)\r\n \r\n return", "def get_52_week_high_low_for_stocks(stocks):\n print(\"Fetching stock quotes.\")\n # Build a full list of symbols\n symbols = []\n for key in stocks.keys():\n symbols.append(key)\n\n num_of_batches = int(len(symbols)/BATCH_SIZE) + 1\n\n all_stocks_df = pandas.DataFrame()\n\n #all_stocks_df = pandas.DataFrame()\n\n # Get quotes for all the stocks in batches\n for i in range(0, num_of_batches):\n print(\"Fetching quotes in batch: \" + str(i+1) + \"/\" + str(num_of_batches))\n start = i*BATCH_SIZE\n end = start + BATCH_SIZE\n batch_symbols = symbols[start: end]\n batch_symbols_query = '+'.join(batch_symbols)\n request_url = YAHOO_FINANCE_API + \"?\" + YAHOO_FINANCE_SYMBOL_PARAM + \"=\" + batch_symbols_query +\\\n \"&\" + YAHOO_FINANCE_FORMAT_PARAM + \"=\" + YAHOO_FINANCE_SYMBOL_PARAM + YAHOO_FINANCE_52_ASK_PRICE +\\\n YAHOO_FINANCE_BID_PRICE + YAHOO_FINANCE_52_CLOSE_PRICE + YAHOO_FINANCE_52_WEEK_LOW +\\\n YAHOO_FINANCE_52_WEEK_HIGH + YAHOO_FINANCE_52_LOW_CHANGE +\\\n YAHOO_FINANCE_52_HIGH_CHANGE + YAHOO_FINANCE_DIV_YIELD\n r = requests.get(request_url)\n\n # Read the returned CSV as a pandas table\n # Returned format is NAME,ASK,BID,52-wLow,52-wHigh\n df = pandas.read_table(StringIO(r.text), header=None, sep=',')\n all_stocks_df = all_stocks_df.append(df, ignore_index=True)\n\n # Delay to slow down things\n time.sleep(1)\n\n\n # Assign columns\n print(\"Stock quotes have been fetched. Beginning analysis...\")\n all_stocks_df.columns=['symbol', 'ask', 'bid', 'close', '52w-low', '52w-high', '52w-low-change', '52w-high-change', 'div-iteryield']\n\n # Add the percent change columns\n all_stocks_df['52w-%-low-change'] = all_stocks_df['52w-low-change']/all_stocks_df['52w-low']*100\n all_stocks_df['52w-%-high-change'] = all_stocks_df['52w-high-change'] / all_stocks_df['52w-high'] * 100\n\n # Add the names and sectors\n all_stocks_df['name'] = \"\"\n all_stocks_df['sector'] = \"\"\n for index, row in all_stocks_df.iterrows():\n all_stocks_df.loc[index, 'name'] = stocks[row['symbol']][0]\n all_stocks_df.loc[index, 'sector'] = stocks[row['symbol']][1]\n\n\n # Process the received quotes\n sorted_values = all_stocks_df.sort_values('52w-%-low-change')\n\n # Done\n print(\"Analysis completed.\")\n return sorted_values", "def _optimise(self):\n pass", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "def free(amounts: Dict[str, int]) -> None:\n for name, amount in amounts.items():\n assert 0 <= amount <= Resources.total[name] - Resources.available[name]\n Resources.available[name] += amount", "async def _garbage_collect_sim(self, base: str, trade_size: float, reserved: float):\n\n if not config['trade_garbage_collect']:\n return\n\n base_mult = await self.market.get_base_mult(config['trade_base'], base)\n current_balance = self.balancer.sim_balances[base] * base_mult - reserved\n\n if current_balance >= trade_size:\n return\n\n open_trades_by_time = []\n for pair in self.trades:\n if pair.split('-')[0] == base:\n for trade in self.trades[pair]['open']:\n open_trades_by_time.append((trade['open_time'], trade))\n\n open_trades_sorted = [trade_tuple[1] for trade_tuple in sorted(open_trades_by_time, key=lambda x: x[0])]\n\n if open_trades_sorted:\n collect_trade = open_trades_sorted[0]\n await self._sell_sim(collect_trade, 'GARBAGE COLLECT SELL', remit=False)\n self.trades[collect_trade['pair']]['open'].remove(collect_trade)", "def optimize_weights(self, generations):\n for gen in range(generations):\n print(\" Generation: %s\" % gen)\n self._pop_f1 = 0\n self._queue_search(self.population)\n self._queue.join()\n self._scores = {}\n while not self._results.empty():\n (index, f1) = self._results.get()\n self._scores[index] = f1\n self._pop_f1 += f1\n ranks = sorted(range(self.population_size), key=lambda s: (self._scores.get(s)))\n self._report(ranks)\n self._next_generation(ranks)", "def improve_population(self):\r\n for index in range(len(self.district_population)):\r\n district = self.district_population[index]\r\n districtsolution = hillclimber.HillClimber(district, self.cable_cost, self.battery_cost)\r\n self.district_population[index] = districtsolution.run(1000, 80000)\r\n self.cost_populations[index] = district.total_cost(self.battery_cost, self.cable_cost)", "def reallocate(banks):\n distributions = dict()\n cycles = 0\n\n while tuple(banks) not in distributions:\n distributions[tuple(banks)] = cycles\n redistribute(banks, banks.index(max(banks)))\n cycles += 1\n\n cycles_in_loop = cycles - distributions[tuple(banks)]\n return cycles, cycles_in_loop", "def find_allocation_with_min_shering(self):\n for consumption_graph in self.graph_generator.generate_all_consumption_graph():\n self.find_allocation_for_graph(consumption_graph)\n return self.min_sharing_allocation", "def minimize(self):\n pass", "def gen_14BQ_OH():\r\n q_smiles_base = {}\r\n q_smiles_mid = {}\r\n q_smiles_base['1,4-BQ,2-OH'] = '[H]OC1=C([H])C(=O)C([H])=C([H])C1=O'\r\n q_smiles_base['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O'\r\n q_smiles_base['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n\r\n q_smiles_mid['1,4-BQ'] = 'O=C1C=CC(=O)C=C1'\r\n q_smiles_mid['1,4-BQ,2-OH'] = 'OC1=CC(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3-OH'] = 'OC1=C(O)C(=O)C=CC1=O'\r\n q_smiles_mid['1,4-BQ,2,3,5-OH'] = 'OC1=CC(=O)C(O)=C(O)C1=O'\r\n q_smiles_mid['1,4-BQ,Full-OH'] = 'OC1=C(O)C(=O)C(O)=C(O)C1=O' \r\n\r\n return q_smiles_base, q_smiles_mid", "def analyse_rsi(stocks_data, oversold=25, overbought=85):\n print('\\n--- RSI ANALYSIS ---')\n for stock_symbol, df in stocks_data.items():\n rsi = find_rsi(df)\n min_rsi = oversold\n max_rsi = overbought\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r < min_rsi:\n min_rsi = r\n elif r > max_rsi:\n max_rsi = r\n\n mins = []\n maxs = []\n for i in range(len(rsi[1])):\n r = rsi[1][i]\n if r <= min_rsi * 1.2:\n mins.append((rsi[0][i], rsi[1][i]))\n elif r >= max_rsi * 0.95:\n maxs.append((rsi[0][i], rsi[1][i]))\n\n if mins:\n print(stock_symbol)\n [print(m[0], m[1]) for m in mins]\n if maxs:\n print(stock_symbol)\n [print(m[0], m[1]) for m in maxs]", "def entrycalc(self, lows, o):\n price = float(self.price)\n \n #print(nextTrade==price,nextTradeSeller==price)\n for i in range(2, self.entries + 1):\n if len(self.entryprices) > 0:\n avgentryprice = sum(self.entryprices) / len(self.entryprices)\n #if previous entry has been placed and current hasn't and other args are met\n if self.dentry[\"placedOrder\" + str(i - 1) + self.chartnumber] and price < avgentryprice and float(price) < lows[-2] and float(price) < float(o) and not self.dentry[\"placedOrder\" + str(i) + self.chartnumber]:\n self.dentry[\"placedOrder\" + str(i) + self.chartnumber] = True\n #add these to dict\n print(\"trade number\",str(i))\n self.dentry[\"tradeEntries\" + str(i) + self.chartnumber] += 1\n #self.totalentries += 1\n \n #I changed these from price to nextTrade\n self.dentry[\"orderPrice\" + str(i) + self.chartnumber] = price\n #self.dentry[\"orderPrice\" + str(i) + chartnumber] = self.nextTrade\n \n #altbuy = int(self.dentry[\"buy\" + str(i) + chartnumber] / price)\n altbuy = int(self.dentry[\"buy\" + str(i) + self.chartnumber] / self.nextTrade)\n \n #self.availablebase -= altbuy * price\n self.availablebase -= altbuy * self.nextTrade\n altbuy -= altbuy * .001\n self.amtofalt += altbuy\n ###HOW LONG TO WE WANT ENTRYPRICES TO BE??\n \n #self.entryprices.append(price)\n self.entryprices.append(self.nextTrade)\n if self.graphics:\n self.graph.buy(self.masterDick[\"currentPrice\" + self.chartnumber], self.masterDick[\"count\" + self.chartnumber], self.chartnumber, i)\n #print(\"Fun:\",self.amtofalt)\n print(\"Buy\" + str(i),self.dentry[\"buy\" + str(i) + self.chartnumber])\n break", "def __call__(self):\n gains = []\n numSim = self.nsim\n \n for n in range(numSim):\n if ENTRY_STRATEGY == 'random':\n gains.append(self.runRandomEntryStrat()) \n elif ENTRY_STRATEGY == 'delayed':\n gains.append(self.runDelayedEntryStrat())\n else:\n raise NotImplemented(\n 'Market entry strategy \"%s\" not implemented' % ENTRY_STRATEGY)\n # Average wasted time\n self.wastedTime = float(self.wastedTime) / numSim\n \n means = np.average(gains)\n medians = np.median(gains)\n low_25 = np.percentile(gains, 25)\n high_25 = np.percentile(gains, 75)\n '''\n freq,bins = np.histogram(gains, bins=25)\n freq = freq.astype('f')/sum(freq.astype('f'))\n \n \n if self.cumulative:\n \n bins_cp = np.array([(bins[a] + bins[a+1])/2.0 for a in range(len(bins)-1)])\n pos_freq = freq[bins_cp>=medians]\n neg_freq = freq[bins_cp<medians]\n pos_freq = np.array([sum(pos_freq[q:]) for q in range(len(pos_freq))])\n neg_freq = np.array([sum(neg_freq[q::-1]) for q in range(len(neg_freq))])\n freq = np.hstack([neg_freq, pos_freq])\n '''\n freq = []\n step = 1\n rng = range(0,51,step)[1:]\n freq += rng\n bins = np.percentile(gains, rng)\n \n rng = range(50,101,step)[1:]\n freq += [100 - a for a in rng]\n bins = np.hstack([bins, np.percentile(gains, rng)])\n freq = np.array(freq)\n \n \"\"\"\n for a in range(0,51,step)[1:]:\n freq.append(a)\n bins.append(np.percentile(gains, a))\n for a in range(50,101,step)[1:]:\n freq.append(100 - a)\n bins.append(np.percentile(gains, a)) \n bins = np.array(bins)\n freq = np.array(freq)\n \"\"\"\n \n X = np.ones(len(bins))*self.daysHeld\n Y = bins\n C = freq\n \n return (means, medians, X, Y, C, self.daysHeld, low_25, high_25, self.wastedTime)", "def processMarketOrders(self):\n try:\n nextRound = self.currentRound+1\n resultsList = []\n master = {}\n self.genMarketStat()\n myMarketStat = self.marketStats[str(self.currentRound)]\n \n # sorted lists of market orders\n master['buyAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'AL', 'min':0})\n master['buyEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'EC', 'min':0})\n master['buyIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'max', True, {'value':'IA', 'min':0})\n master['sellAL'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'AL', 'max':0})\n master['sellEC'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'EC', 'max':0})\n master['sellIA'] = anwp.func.funcs.sortDictByChildObjValue(self.marketOrders, 'min', False, {'value':'IA', 'max':0})\n \n for res in ['AL', 'EC', 'IA']:\n for sellOrder in master['sell%s' % res]:\n # min sell order gets first chance to sell its product\n if sellOrder.amountUsed == sellOrder.amount:\n pass # seller has sold all he wants with this order\n else:\n i = 0\n for buyOrder in master['buy%s' % res]:\n # determine price, allow for bidding on price\n try:\n nextBuyOrder = master['buy%s' % res][i+1]\n if nextBuyOrder.max < buyOrder.max and (nextBuyOrder.max+1) >= sellOrder.min:\n price = nextBuyOrder.max + 1\n else:\n price = buyOrder.max\n except IndexError:\n price = buyOrder.max\n # max buy order gets first chance to buy sellers product\n resultsList.append(self.processMarketTransaction(buyOrder, sellOrder, price))\n i += 1\n \n # set the average market prices for this round\n if getattr(myMarketStat, 'volSold%s' % res) > 0:\n setattr(myMarketStat, 'avgSold%s' % res, (getattr(myMarketStat, 'sumSold%s' % res) / \n getattr(myMarketStat, 'volSold%s' % res)))\n \n # clean up market orders for next round\n for orderID in self.marketOrders.keys():\n myMarketOrder = self.marketOrders[orderID]\n myMarketOrder.cleanUp()\n if myMarketOrder.amount == 0:\n resultsList.append('cancel market Order=%s' % orderID)\n self.cancelMarketOrder(orderID)\n \n return str(resultsList)\n except:\n return 'galaxy->processMarketOrders error'", "def minimize_risk(data, returns=None, strict=True, riskfree=None, max_alloc=1,\n short_sell=False, scale=1, ret=False, verbose=True, plotit=False):\n logger = logging.getLogger(__name__)\n if ret:\n weekly = data\n else:\n weekly = get_returns(data, 'simple')\n ret = weekly.mean().values * scale\n cov = weekly.cov().values * scale\n if short_sell:\n return pd.DataFrame()\n n = data.shape[1]\n if riskfree is None:\n aloc = pd.DataFrame(columns=np.append(data.columns, ['Volatility','Return']))\n bounds = [(0,max_alloc)]*n\n else:\n ret = np.append(ret, riskfree)\n cov = np.hstack([ np.vstack([cov,np.zeros([1,n])]), np.zeros([n+1,1]) ])\n aloc = pd.DataFrame(columns=np.append(data.columns, ['risk-free','Volatility','Return']))\n bounds = [(0,max_alloc)]*n + [(0,1)]\n n += 1\n if returns is None:\n returns = np.linspace(min(ret),max(ret), 25, endpoint=True)\n\n from scipy.optimize import minimize\n from basic.useful import progress_bar\n def func(alpha):\n def loss(x):\n return x.dot(cov).dot(x)\n def jac(x):\n return cov.dot(x) * 2\n cons1 = {'type':'eq',\n 'fun': lambda x: np.ones(n).dot(x) - 1,\n 'jac': lambda x: np.ones(n)}\n types = 'eq'\n if not strict: types = 'ineq'\n cons2 = {'type':types,\n 'fun': lambda x: ret.dot(x) - alpha,\n 'jac': lambda x: ret}\n x = minimize(loss, np.ones(n)/n, jac=jac, constraints=[cons1,cons2], bounds=bounds, method='SLSQP')\n aloc.loc[alpha, :] = np.append(np.round(x['x'],4), [np.sqrt(x['fun']), ret.dot(x['x'])] )\n return \"\"\n progress_bar(returns, func, disable=not verbose)\n if plotit:\n import matplotlib.pyplot as plt\n from invest.plot import return_vol\n vol = np.sqrt( np.diag(cov) )\n return_vol(ret, vol, data.columns)\n plt.plot(aloc.Volatility*100, aloc.Return*100, '.-')\n sharpe = aloc.Return/aloc.Volatility\n arg = sharpe.argmax()\n plt.plot(aloc.Volatility[arg]*100, aloc.Return[arg]*100, 'rX', markersize=12)\n print(\"Max Sharpe ratio is {:.2f}\".format(sharpe[arg]))\n return aloc.astype(float)", "def minimize(self):\n raise NotImplementedError", "def CodePagesToReachedSize(reached_symbol_names, page_to_symbols):\n reached_symbol_names = set(reached_symbol_names)\n page_to_reached = {}\n for offset in page_to_symbols:\n total_size = sum(x[1] for x in page_to_symbols[offset])\n reached_size = sum(\n size_in_page for (name, size_in_page) in page_to_symbols[offset]\n if name in reached_symbol_names)\n page_to_reached[offset] = {'total': total_size, 'reached': reached_size}\n return page_to_reached", "def analyze(allocs, stackstr, progname, depth, threshold_mallocs, threshold_score):\n if len(allocs) < int(threshold_mallocs):\n # Ignore call sites with too few mallocs\n return []\n analyzed_list = []\n # The set of sizes of allocated objects.\n sizes = set()\n # A histogram of the # of objects allocated of each size.\n size_histogram = defaultdict(int)\n # mallocs - frees (of things allocated in this context)\n actual_footprint = 0\n # max actual_footprint\n peak_footprint = 0\n # index of alloc w/max footprint\n peak_footprint_index = 0\n # sum(mallocs) = the amount of memory used if frees were ignored\n nofree_footprint = 0\n # set of all thread ids used for malloc/free\n tids = set()\n # set of all (currently) allocated objects from this site\n mallocs = set()\n # total number of allocations\n num_allocs = 0\n # was size ever invoked? true iff size was invoked\n size_taken = False\n # true iff all size requests were properly aligned\n all_aligned = True\n # amount of space that would leak if frees were ignored\n would_leak = 0\n for (index, i) in enumerate(allocs):\n # If a size was taken, record this fact and continue.\n if i[\"action\"] == \"S\":\n size_taken = True\n continue\n if len(i[\"stack\"]) < depth:\n continue\n sizes.add(i[\"size\"])\n size_histogram[i[\"size\"]] += 1\n tids.add(i[\"tid\"])\n if i[\"action\"] == \"M\":\n if i[\"reqsize\"] == 0 or i[\"reqsize\"] % 16 != 0:\n # if all_aligned:\n # print(\"FIXME first reqsize not aligned: \" + str(i[\"reqsize\"]))\n all_aligned = False\n num_allocs += 1\n # Compute actual footprint (taking into account mallocs and frees).\n actual_footprint += i[\"size\"]\n if actual_footprint > peak_footprint:\n peak_footprint = actual_footprint\n peak_footprint_index = index\n # Compute total 'no-free' memory footprint (excluding frees) This\n # is how much memory would be consumed if we didn't free anything\n # until the end (as with regions/arenas). We use this to compute a\n # \"region score\" later.\n nofree_footprint += i[\"size\"]\n # Record the malloc so we can check it when freed.\n mallocs.add(i[\"address\"])\n elif i[\"action\"] == \"F\":\n if i[\"address\"] in mallocs:\n # Only reclaim memory that we have already allocated\n # (others are frees to other call sites).\n actual_footprint -= i[\"size\"]\n mallocs.remove(i[\"address\"])\n else:\n would_leak += i[\"size\"]\n # print(mallocs)\n # print(str(i[\"address\"]) + \" not found\")\n # Compute region_score (0 is worst, 1 is best - for region replacement).\n region_score = 0\n if nofree_footprint != 0:\n region_score = peak_footprint / nofree_footprint\n if region_score >= float(threshold_score):\n stk = eval(stackstr)\n output = {\n \"stack\": stk,\n \"allocs\": num_allocs,\n \"region_score\": region_score,\n \"threads\": tids,\n \"sizes\": sizes,\n \"size_histogram\": size_histogram,\n \"peak_footprint\": peak_footprint,\n \"nofree_footprint\": nofree_footprint,\n \"potential_leaks\": would_leak,\n \"size_taken\": size_taken,\n \"all_aligned\": all_aligned,\n }\n analyzed_list.append(output)\n return analyzed_list", "def stocks(values, maxSales):\n return 0", "def calc_performance(self):\n for symbol in self.portfolio.assets.keys():\n\n # Total the Performance of all the trades\n start = self.portfolio.trades[symbol].index[0]\n end = self.portfolio.trades[symbol].index[-1]\n trades = len(self.record[symbol])\n profit = self.record[symbol]['profit'].sum()\n loss = self.record[symbol]['loss'].sum()\n # Total or average the trade info for all the trades\n try:\n wins = len(self.record[symbol].groupby('win/loose').groups['w'])\n except (ValueError, KeyError):\n wins = 0\n try:\n losses = len(self.record[symbol].groupby('win/loose').groups['l'])\n except (ValueError, KeyError):\n losses = 0\n try:\n washes = len(self.record[symbol].groupby('win/loose').groups['-'])\n except (ValueError, KeyError):\n washes = 0\n max_drawdown = self.record[symbol]['drawdown'].max()\n average_drawdown = self.record[symbol]['drawdown'].mean()\n max_drawdown_time = self.record[symbol]['drawdown days'].max()\n average_drawdown_time = self.record[symbol]['drawdown days'].mean()\n # Average the risk and market comparisons for all trades\n vol_risk = self.record[symbol]['volatility'].mean()\n beta = self.record[symbol]['beta'].mean()\n lpm_risk = self.record[symbol]['lpm'].mean()\n e_r = self.record[symbol]['expected_return'].mean()\n # Calculate Risk measures\n treynor_ratio = (e_r - self.risk_free_return) / beta\n sharpe_ratio = (e_r - self.risk_free_return) / vol_risk\n # Package up the data for each symbol\n self.performance[symbol] = {\n 'start': start,\n 'end': end,\n 'trades': trades,\n 'wins': wins,\n 'losses': losses,\n 'washes': washes,\n 'profit': profit,\n 'loss': loss,\n 'net_profit': profit - loss,\n 'profit_factor': profit / loss if loss != 0 else 1.0,\n 'percent_profitable': wins / trades if trades != 0 else 0.0,\n 'average_trade_net_profit' : (profit - loss) / trades if trades != 0 else 0.0,\n 'max_drawdown' : max_drawdown,\n 'average_drawdown' : average_drawdown,\n 'max_drawdown_days' : max_drawdown_time,\n 'average_drawdown_days' : average_drawdown_time,\n 'volatility_risk' : vol_risk,\n 'beta' : beta,\n 'lower_partial_moment_risk' : lpm_risk,\n 't_r' : treynor_ratio,\n 's_r' : sharpe_ratio\n }\n\n return self", "def reduceUniverse(self):\r\n self.bondList = list(set([bond for grid in self.parent.gridList for bond in grid.bondList]))#set removes duplicates\r\n self.df = self.df.reindex(self.bondList)\r\n self.df = self.df[pandas.notnull(self.df['ISIN'])]\r\n self.rfbonds = list(self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers)].index)\r\n self.embondsisins = self.df.loc[~self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']\r\n self.rfbondsisins = self.df.loc[self.df['TICKER'].isin(self.riskFreeIssuers), 'ISIN']", "def find_max_profit(stock_prices,k):\n\teliminated_indices = set()\n\ttotal_profit = 0\n\n\t\n\tfor i in range(0,k):\n\t\tmax_profit = float('-inf')\n\t\tmin_price = float('inf')\n\t\t\n\t\tfor current_index,current_price in enumerate(stock_prices):\n\t\t\t# This condition takes care of note by making sure that \n\t\t\t# prices are not used in previous transaction.\n\t\t\tif current_index not in eliminated_indices:\n\t\t\t\tcurrent_profit = current_price - min_price\n\n\t\t\t\tif (current_profit > max_profit):\n\t\t\t\t\tbuying_price_index = min_price_index\n\t\t\t\t\tselling_price_index = current_index\n\t\t\t\t\tmax_profit = current_profit\n\n\t\t\t\t#min_price = min(min_price, current_price)\n\t\t\t\tif (current_price < min_price):\n\t\t\t\t\tmin_price = current_price\n\t\t\t\t\tmin_price_index = current_index\n\n\n\t\t# This for loop is to take care of Note\n\t\tfor i in range(buying_price_index,selling_price_index+1):\n\t\t\teliminated_indices.add(i)\n\n\t\ttotal_profit += max_profit\n\t\tprint('buying_price_index :',buying_price_index)\n\t\tprint(\"selling_price_index :\",selling_price_index)\n\n\treturn total_profit" ]
[ "0.6063045", "0.53763187", "0.5290051", "0.52126926", "0.5207097", "0.5145416", "0.50932026", "0.50518227", "0.50404334", "0.49763635", "0.49679303", "0.49524197", "0.48846778", "0.48805937", "0.48631468", "0.4856346", "0.4832151", "0.48185173", "0.48169646", "0.48079696", "0.4805696", "0.47952592", "0.47914487", "0.47780216", "0.47716072", "0.47709826", "0.4770973", "0.47607952", "0.47441167", "0.4699819" ]
0.55457914
1
Given a starting value and prices of stocks in portfolio with allocations return the portfolio value over time.
def get_portfolio_value(prices, allocs, start_val): normed = prices/prices.iloc[0] alloced = np.multiply(allocs, normed) pos_vals = alloced * start_val port_val = pos_vals.sum(axis=1) return port_val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def compute_portvals(start_date, end_date, orders_file, start_val):\n \n #Read order file\n orders = pd.read_csv( orders_file, parse_dates = [0])\n \n #Get symbols making up the portfolio\n stock_symbols = list( set( orders[\"Symbol\"] ) )\n dates = pd.date_range(start_date, end_date)\n \n #Read stock prices\n stock_prices = get_data(stock_symbols, dates)\n \n #Create a portfolio keeping track of positions, \n #_CASH column indicates cash position, _VALUE total portfolio value\n #_LEVERAGE the leverage of portfolio when we allow for short selling\n symbols = stock_symbols[:] #Shallow copy of the list\n symbols.append(\"_CASH\")\n symbols.append(\"_VALUE\")\n symbols.append(\"_LEVERAGE\")\n \n #Index contains only business days, same dates as stock prices\n portfolio = pd.DataFrame(index=stock_prices.index, columns = symbols )\n portfolio.fillna(0) \n portfolio[\"_CASH\"][0] = start_val\n portfolio[\"_VALUE\"][0] = start_val\n \n #Snapshot of a portfolio at any time. To avoid using numerical indexes\n portfolio_snapshot = dict.fromkeys ( symbols, 0 )\n portfolio_snapshot[\"_CASH\"] = start_val\n portfolio[\"_VALUE\"] = start_val\n \n #Now calcualte portfolio day by day\n for date in portfolio.index:\n #Check transactions for the day\n day_orders = orders[ orders[\"Date\"] == date ] \n \n for ord in day_orders.iterrows():\n symbol = ord[1][ \"Symbol\"] \n stock_price = stock_prices[ symbol ][ date ]\n shares = ord[1][\"Shares\" ]\n side = ord[1][\"Order\"]\n \n if side == \"BUY\":\n portfolio_snapshot[ \"_CASH\" ] -= stock_price * shares\n portfolio_snapshot[ symbol ] += shares \n elif side == \"SELL\":\n portfolio_snapshot[ \"_CASH\" ] += stock_price * shares\n portfolio_snapshot[ symbol ] -= shares\n else:\n raise \"Order not recognized.\"\n \n #Compute portfolio value\n portfolio_snapshot[ \"_VALUE\" ] = portfolio_snapshot[ \"_CASH\" ]\n shorts = longs = 0\n for symbol in stock_symbols: \n stock_price = stock_prices[ symbol ][ date ]\n shares = portfolio_snapshot[ symbol ]\n notional = stock_price*shares\n if shares > 0:\n longs += notional\n else:\n shorts += notional\n \n portfolio_snapshot[ \"_VALUE\" ] += notional\n \n #Compute leverage\n leverage = (longs+shorts)/(longs-shorts + portfolio_snapshot[ \"_CASH\" ] )\n portfolio_snapshot[ \"_LEVERAGE\" ] = leverage\n \n #Assert we never achieve a leverage > 2.0\n if leverage > 2:\n raise \"Leverage > 2.0 achieved\"\n \n #Update portfolio from the daily snapshot\n #TODO: Is this causing performance issues?\n for symbol in portfolio.keys():\n portfolio[ symbol ][ date ] = portfolio_snapshot[ symbol ]\n \n return portfolio", "def compute_port_val(allocs, prices):\n # normalized price\n # normalized prices\n normed = prices/prices.iloc[0]\n prices.head()\n alloced = normed * allocs\n\n # position values\n start_val = 1 # included to simplify adding ability to calc as $\n pos_vals = alloced * start_val\n\n # portfolio value\n port_val = pos_vals.sum(axis=1)\n\n return port_val", "def getPortfolioValue(self, start_t, t):\n sum_tmp=0\n for item in self.portfolio.keys():\n if \"DJI_\" in item:\n t_tmp=datetime.strftime(pd.date_range(end=t,periods=1,freq='B')[0],'%Y-%m-%d')\n price=universe.get_price_in_currency(item,t_tmp,'CAD')\n elif 'rf_rate' in item:\n price=universe.get_security(item).get_cc_return(start_t,t) \n else:\n price=universe.get_price_in_currency(item,t,'CAD')\n #price=universe.get_security(item).price[t]\n amount=self.portfolio[item]\n sum_tmp=sum_tmp+price*amount\n \n return sum_tmp", "def compute_portvals(start_date, end_date, trades_df, start_val):\n # SETTING UP ORDERS DATAFRAME\n # Read orders file into a dataframe http://pandas.pydata.org/pandas-docs/stable/io.html#io-read-csv-table \n orders = trades_df\n symbols = np.unique(orders['Symbol']).tolist() # List of all the symbols used in orders\n\n # SETTING UP PRICES DATAFRAME\n # Read in adjusted closing prices for given symbols, date range... drop non-trading days... add cash column\n dates = pd.date_range(start_date, end_date)\n prices = get_data(symbols, dates, addSPY=False).dropna()\n prices['cash'] = 1.00\n\n # SETTING UP TRADES DATAFRAME\n # Daily snapshot of portfolio changes (+ = Buy Order, - = Sell Order) with cash adjustments\n trades = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n trades['cash'] = 0.00\n\n for row_index, row in orders.iterrows():\n try:\n if row.Order == 'SELL':\n trades.ix[row.Date,row.Symbol] += (-1 * row.Shares) # Subtract ShareAmount for Sell \n trades.ix[row.Date,'cash'] += (row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Sell\n elif row.Order == 'BUY':\n trades.ix[row.Date,row.Symbol] += (row.Shares) # Add ShareAmount for Buy\n trades.ix[row.Date,'cash'] += (-1 * row.Shares * prices.ix[row.Date, row.Symbol]) #adjust cash value for Buy\n else:\n print 'ERROR: order type not recognized, looking for BUY or SELL'\n except:\n print 'Unknown Error:'\n\n\n # SETTING UP HOLDINGS DATAFRAME \n # accumulating trades into holdings dataframe, snapshot of shares and cash for given day\n holdings = pd.DataFrame(0.00, index=prices.index, columns=symbols)\n holdings['cash'] = 0.00\n holdings.ix[start_date,'cash'] = start_val # add starting cash value\n previous_row = holdings.iloc[0]\n for row_index, row in holdings.iterrows():\n holdings.ix[row_index] = previous_row + trades.ix[row_index] #previous day's value + trades\n previous_row = row\n\n #SETTING UP VALUES DATAFRAME\n # convert shares into their respective dollar amounts\n values = pd.np.multiply(holdings, prices)\n #DAILY VALUE OF THE PORTFOLIO\n portvals = values.sum(axis=1)\n return portvals", "def test_best_allocation():\n\n # symbols = ['BRCM', 'TXN', 'IBM', 'HNZ'] \n symbols = ['AAPL', 'GOOG', 'IBM', 'MSFT']\n # ['GOOG','AAPL','GLD','XOM']\n basic_portfolio = BasicPortfolio(symbols, dt.datetime(2014, 1, 1), dt.datetime(2014, 12, 31))\n\n alloc = range(4)\n\n sharpe_max = 0\n alloc_max = alloc[:]\n\n for i in range(11):\n alloc[0] = i * 0.1\n for j in range(11 - i):\n alloc[1] = j * 0.1\n for k in range(11 - i - j):\n alloc[2] = k * 0.1\n alloc[3] = (10 - i - j - k) * 0.1\n\n vol, daily_ret, sharpe, cum_ret = \\\n basic_portfolio.analyze(alloc)\n\n if sharpe > sharpe_max:\n sharpe_max = sharpe\n alloc_max = alloc[:]\n\n print 'Best sharpe ratio is ', sharpe_max\n print 'Best allocation is', alloc_max\n\n ref_symbol = '$SPX'\n\n basic_portfolio.plot_with_reference(alloc_max, ref_symbol, source='local')", "def run(self, max_risk=0, min_return=0, num=0, init_holdings=None):\n if not self.dates:\n self.dates = ['2010-01-01', '2012-12-31']\n self.load_data()\n\n num_months = len(self.df_all)\n first_purchase = True\n result = {}\n baseline_result = {}\n self.baseline_values = [0]\n self.update_values = [0]\n months = []\n\n # Define dataframe to save output data \n headers = ['Date', 'Value'] + self.stocks + ['Variance', 'Returns']\n self.opt_results_df = pd.DataFrame(columns=headers)\n row = []\n\n self.price_df = pd.DataFrame(columns=self.stocks)\n\n # Initialize the plot\n plt.ylim(ymax = 1.5*self.budget, ymin = -1.5*self.budget)\n plt.xticks(list(range(0, num_months, 2)), \n self.df_baseline.index.strftime('%b')[::2], rotation='vertical')\n plt.locator_params(axis='x', nbins=num_months/2)\n plt.plot(list(range(0, num_months)), [0]*(num_months), \n color='red', label=\"Break-even\", linewidth=0.5)\n\n for i in range(3, num_months):\n\n # Look at just the data up to the current month\n df = self.df_all.iloc[0:i+1,:].copy()\n baseline_df_current = self.df_baseline.iloc[0:i+1,:]\n print(\"\\nDate:\", df.last_valid_index())\n months.append(df.last_valid_index().date()) \n\n if first_purchase:\n budget = self.budget\n initial_budget = self.budget\n baseline_shares = (budget / baseline_df_current.iloc[-1])\n baseline_result = {self.baseline[0]: baseline_shares} \n else:\n # Compute profit of current portfolio\n budget = sum([df.iloc[-1][s]*result['stocks'][s] for s in self.stocks]) \n self.update_values.append(budget - initial_budget)\n\n # Compute profit of fund portfolio\n fund_value = sum([baseline_df_current.iloc[-1][s]*baseline_result[s] \n for s in self.baseline]) \n self.baseline_values.append(fund_value - initial_budget)\n\n self.budget = budget \n\n self.load_data(df=df)\n\n self.price_df.loc[i-2] = list(self.price.values)\n\n # Output for user on command-line and plot\n update_values = np.array(self.update_values, dtype=object)\n baseline_values = np.array(self.baseline_values, dtype=object)\n plt.plot(range(3, i+1), update_values, \n color='blue', label=\"Optimized portfolio\")\n plt.plot(range(3, i+1), baseline_values, \n color='gray', label=\"Fund portfolio\", linewidth=0.5)\n \n if first_purchase:\n plt.legend(loc=\"lower left\")\n plt.title(\"Start: {start}, End: {end}\".format\\\n (start=self.df_all.first_valid_index().date(), \n end=self.df_all.last_valid_index().date()))\n\n plt.savefig(\"portfolio.png\")\n plt.pause(0.05)\n \n # Making solve run\n if self.model_type == 'DQM':\n print(f\"\\nMulti-Period DQM Run...\")\n \n self.build_dqm()\n self.solution['DQM'] = self.solve_dqm()\n result = self.solution['DQM']\n else:\n print(f\"\\nMulti-Period CQM Run...\")\n\n # Set budget to 0 to enforce that portfolio is self-financing \n if self.t_cost and not first_purchase:\n self.budget = 0 \n\n self.solution['CQM'] = self.solve_cqm(max_risk=max_risk, \n min_return=min_return,\n init_holdings=init_holdings)\n result = self.solution['CQM']\n init_holdings = result['stocks']\n\n # Print results to command-line\n value = sum([self.price[s]*result['stocks'][s] for s in self.stocks])\n returns = result['return']\n variance = result['risk'] \n\n row = [months[-1].strftime('%Y-%m-%d'), value] + \\\n [result['stocks'][s] for s in self.stocks] + \\\n [variance, returns] \n self.opt_results_df.loc[i-2] = row \n \n first_purchase = False\n\n print(self.opt_results_df)\n print(f'\\nRun completed.\\n')\n\n plt.savefig(\"portfolio.png\")\n plt.show(block=False)", "def market_value(self, ref_prices, suspensions=None):\n # TODO some securities could not be able to be traded\n if suspensions is None:\n suspensions = []\n \n market_value_float = 0.0\n market_value_frozen = 0.0 # suspended or high/low limit\n for sec in self.holding_securities:\n size = self.get_position(sec).current_size\n # TODO PortfolioManager object should not access price\n price = ref_prices[sec]\n mv_sec = price * size\n if sec in suspensions:\n market_value_frozen += mv_sec\n else:\n market_value_float += mv_sec\n \n return market_value_float, market_value_frozen", "def portfolio_allocation(self, data, total_risk):\n total_rating = data[\"rating\"].sum()\n shares = {}\n risk_amt = total_risk\n for _, row in data.iterrows():\n numshares = int(float(row[\"rating\"]) / float(total_rating) * float(risk_amt) / float(row[\"price\"]))\n if numshares > 10:\n multiplier = int(numshares / 10)\n numshares = multiplier * 10\n shares[row[\"symbol\"]] = numshares\n\n risk_amt -= numshares * row[\"price\"]\n # debug\n # for k, v in shares.items():\n # print(\"[*] Ticker: {}, Shares: {}\".format(k, v))\n return shares", "def test_interest_vs_stockprice(self):\n stock_prices = np.array([[5, 10, 20, 40]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def momentum(portfolio_item, transaction_volume, cash_allocation):\n from yahooquery import Ticker\n from math import floor\n import talib\n from .TradeHistoryItem import log_trade\n from API.Help import is_increasing, initialize_alpaca\n\n alpaca = initialize_alpaca()\n\n yahoo_ticker = Ticker(str(portfolio_item))\n info = yahoo_ticker.history()\n ma_5 = talib.SMA(info['close'], timeperiod=5)\n ma_20 = talib.SMA(info['close'], timeperiod=20)\n volume = info['volume']\n\n if portfolio_item.shares == 0:\n # if the price goes from below the sma to above, buy\n if ma_5[-1] > (ma_20[-1] * 1.1) and is_increasing(volume, 3):\n print('buying {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'buy', 'market', 'day')\n portfolio_item.buy(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=0)\n # if the price goes from above the sma to below, short\n elif ma_5[-1] < (ma_20[-1] * .9) and not is_increasing(volume, 3) and portfolio_item.shares == 0:\n transaction_volume = floor(cash_allocation / (portfolio_item.ticker.price_now * 1.1))\n print('shorting {} shares of {}'.format(transaction_volume, str(portfolio_item)))\n alpaca.submit_order(str(portfolio_item), transaction_volume, 'sell', 'market', 'day')\n portfolio_item.short(transaction_volume=transaction_volume, cash_allocated=cash_allocation)\n log_trade(portfolio_item=portfolio_item, transaction_volume=transaction_volume, transaction_type=3)", "def cumulative_returns(shares_allocation, capital, test_data):\n\n # list of DataFrames of cumulative returns for each stock\n daily_returns = []\n\n # iterates over every stock in the portfolio\n for stock in shares_allocation.index:\n\n # multiples shares by share prices in the validation dataset\n daily_returns.append(shares_allocation.loc[stock].values * test_data[stock])\n\n # concatenates every DataFrame in the above list to a single DataFrame\n daily_returns_df = pd.concat(daily_returns, axis=1).reset_index()\n\n # sets the index as the date\n daily_returns_df.set_index(\"Day\", inplace=True)\n\n # adds the cumulative returns for every stock\n cumulative_daily_returns = daily_returns_df.sum(axis=1)\n\n # returns the cumulative daily returns of the portfolio\n return cumulative_daily_returns", "def get_portfolio_prices(stocks: list, funds: list, etfs: list, start_date: str, end_date=today) -> pd.DataFrame:\r\n data_frames_stocks = get_assets_data_frames(\r\n stocks, inv.get_stock_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_funds = get_assets_data_frames(\r\n funds, inv.get_fund_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n data_frames_etfs = get_assets_data_frames(\r\n etfs, inv.get_etf_historical_data, 'brazil', start_date=start_date, end_date=end_date)\r\n\r\n data_frames = [*data_frames_stocks, *data_frames_funds, *data_frames_etfs]\r\n\r\n assets = [*stocks, *funds, *etfs]\r\n\r\n portfolio_prices = build_multi_index_data_frame(\r\n data_frames, assets, ['Close', 'Open', 'High', 'Low'])\r\n\r\n return portfolio_prices", "def optimize_portfolio(sd=dt.datetime(2008,1,1), ed=dt.datetime(2009,1,1), \\\n syms=['GOOG','AAPL','GLD','XOM'], gen_plot=False):\n\n # Read in adjusted closing prices for given symbols, date range\n dates = pd.date_range(sd, ed)\n prices_all = get_data(syms, dates) # automatically adds SPY\n prices = prices_all[syms] # only portfolio symbols\n prices_SPY = prices_all['SPY'] # only SPY, for comparison later\n\n\t# find the allocations for the optimal portfolio\n #1 provide an initial guess for x\n allocs = np.ones(len(syms))/len(syms)\n #2 Provide constraints to the optimizer\n bounds = [(0,1) for i in syms]\n constraints = ({ 'type': 'eq', 'fun': lambda inputs: 1.0 - np.sum(inputs) })\n #3 call the optimizer\n res = spo.minimize(get_sharpe_ratio, allocs, \n \t\t\t\t\targs=prices, \n \t\t\t\t\tbounds = bounds,\n \t\t\t\t\tconstraints=constraints)\n allocs = res.x\n \n # Get daily portfolio value\n port_val = get_portfolio_value(prices, allocs, 1.0)\n \n # Get portfolio statistics\n cr, adr, sddr, sr = get_portfolio_stats(port_val, \n \t\t\t\t\t\t\t\t\t\tdaily_rf=0.0, \n \t\t\t\t\t\t\t\t\t\tsamples_per_year=252)\n \n # Compare daily portfolio value with SPY using a normalized plot\n if gen_plot:\n # add code to plot here\n df_temp = pd.concat([port_val, prices_SPY], keys=['Portfolio', 'SPY'], axis=1)\n plot_normalized_data(df_temp)\n\n return allocs, cr, adr, sddr, sr", "def portfolio_performance(returns,weights):\r\n print('Calculating Portfolio Performance')\r\n # returns=target_asset_port_data_attributes['component_returns']\r\n # weights =target_asset_port_data_attributes['effective_weights']\r\n\r\n component_returns= returns\r\n compnent_weights = pd.DataFrame(data=np.nan,index= component_returns.index,columns=component_returns.columns)\r\n compnent_weights.loc[weights.index,:] = weights\r\n\r\n portfolio_dates = component_returns.index\r\n components = component_returns.columns\r\n\r\n # pre-allocate\r\n BoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n EoP_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n PnL_df = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=components)\r\n portfolio_BoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio BoP'])\r\n portfolio_EoP = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio EoP'])\r\n portfolio_PnL = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Portfolio PnL'])\r\n \r\n portfolio_index = pd.DataFrame(data=np.nan,index=portfolio_dates,columns=['Index'])\r\n previous_index_value = np.int64(1)\r\n\r\n pre_date = portfolio_dates[0]\r\n # set BoP to start weights\r\n for date,row in component_returns.iterrows():\r\n # print(date)\r\n # 1st date\r\n if date == portfolio_dates[0]:\r\n BoP_df.loc[date] = compnent_weights.iloc[0,:]\r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n\r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n # after first date\r\n else:\r\n BoP_df.loc[date] = EoP_df.loc[pre_date]\r\n # weights override\r\n if date in compnent_weights.index:\r\n none_NaN_index = ~compnent_weights.loc[date].isnull()\r\n if not compnent_weights.loc[date][none_NaN_index].empty:\r\n tmp_sum = BoP_df.loc[date].sum()\r\n BoP_df.loc[date][none_NaN_index.values] = (compnent_weights.loc[date][none_NaN_index.values].values)*tmp_sum\r\n\r\n \r\n EoP_df.loc[date] = BoP_df.loc[date] * (1+component_returns.loc[date])\r\n PnL_df.loc[date] = EoP_df.loc[date].subtract(BoP_df.loc[date])\r\n\r\n portfolio_BoP.loc[date] = BoP_df.loc[date].sum()\r\n portfolio_EoP.loc[date] = EoP_df.loc[date].sum()\r\n portfolio_PnL.loc[date] = PnL_df.loc[date].sum()\r\n \r\n portfolio_index.loc[date] = np.nansum([previous_index_value,portfolio_PnL.loc[date].values])\r\n previous_index_value = portfolio_index.loc[date]\r\n pre_date = date\r\n\r\n\r\n portfolio_returns = portfolio_index.pct_change(1) \r\n portfolio_returns.columns = ['Returns']\r\n\r\n portfolio_index\r\n perf = portfolio_index.calc_stats()\r\n \r\n output = pd.Series(data = [perf,PnL_df,portfolio_index,portfolio_BoP,portfolio_EoP,BoP_df], index=['Portfolio Perf','Component PnL','portfolio_index','portfolio_BoP','portfolio_EoP','BoP_df'])\r\n return output", "def returns_to_prices(returns: pd.Series, start_price: float) -> pd.Series:\n return returns.add(1).cumprod().mul(start_price)", "def getStock(symbol, start, end):\n df = data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_index_portfolio_value_data(game_id: int, symbol: str, start_time: float = None,\n end_time: float = None) -> pd.DataFrame:\n start_time, end_time = get_time_defaults(game_id, start_time, end_time)\n base_value = get_index_reference(game_id, symbol)\n\n with engine.connect() as conn:\n df = pd.read_sql(\"\"\"\n SELECT timestamp, `value` FROM indexes\n WHERE symbol = %s AND timestamp >= %s AND timestamp <= %s;\"\"\", conn, params=[symbol, start_time, end_time])\n index_info = query_to_dict(\"SELECT * FROM index_metadata WHERE symbol = %s\", symbol)[0]\n\n # normalizes index to the same starting scale as the user\n df[\"value\"] = STARTING_VIRTUAL_CASH * df[\"value\"] / base_value\n df[\"username\"] = index_info[\"name\"]\n\n # When a game kicks off, it will generally be that case that there won't be an index data point at exactly that\n # time. We solve this here, create a synthetic \"anchor\" data point that starts at the same time at the game\n trade_start = make_index_start_time(start_time)\n return pd.concat([pd.DataFrame(dict(username=index_info[\"name\"], timestamp=[trade_start],\n value=[STARTING_VIRTUAL_CASH])), df])", "def portfolio():\n #Query transactions by user id\n trans = Transactions.query.filter_by(owner=session['user_id']).all()\n \n #Create list of comanies user owns stock in\n companies = []\n for t in trans:\n if t.symbol not in companies:\n companies.append(t.symbol)\n\n #Create list of current stock dictionaries and total their values\n total = 0\n stocks = []\n for company in companies:\n trans = Transactions.query.filter_by(owner=session['user_id'], symbol=company).all()\n stock = {}\n stock['shares'] = 0\n for t in trans:\n stock['shares'] += t.shares\n if stock['shares'] > 0:\n stock['symbol'] = company\n stock['name'] = lookup(company)['name']\n stock['price'] = lookup(company)['price']\n stock['total'] = stock['shares'] * stock['price']\n stock['price'] = usd(stock['price'])\n stock['total'] = usd(stock['total'])\n total += float(stock['total'][1:].replace(',', ''))\n stocks.append(stock)\n\n #Set user cash and total values\n value = {}\n value['cash'] = usd(Users.query.filter_by(id=session['user_id']).first().cash)\n value['total'] = usd(total + float(value['cash'][1:].replace(',', '')))\n\n #Add values to list\n stocks.append(value)\n\n #Return list of dictionaries\n return stocks", "def getStock(symbol, start, end):\n df = pd.io.data.get_data_yahoo(symbol, start, end)\n\n df.columns.values[-1] = 'AdjClose'\n df.columns = df.columns + '_' + symbol\n df['Return_%s' % symbol] = df['AdjClose_%s' % symbol].pct_change()\n\n return df", "def get_new_allocation(self, day, init=False):\n \"\"\n if init and self.data_train is None:\n # Use uniform allocation\n cur_day_op = self.data.get_op(relative=False)[day, :] # opening prices on |cur_day|\n return util.get_uniform_allocation(self.num_stocks, cur_day_op)\n\n predicted_price_rel = self.predict_price_relatives(day)\n\n # Compute mean price relative of available stocks (x bar at t+1)\n today_op = self.data.get_op(relative=False)[day, :]\n avail_stocks = util.get_avail_stocks(today_op)\n avail_idxs = util.get_available_inds(avail_stocks)\n ppr_avail = predicted_price_rel[avail_idxs] # predicted price relatives of available stocks\n mean_price_rel = np.mean(ppr_avail)\n\n lam = self.compute_lambda(ppr_avail, mean_price_rel, avail_idxs) # lambda at t+1\n\n # limit lambda to avoid numerical problems from acting too aggressively.\n # (referenced from marigold's implementation: https://github.com/Marigold/universal-portfolios)\n lam = min(100000, lam)\n\n # Note: we don't perform simplex project b/c negative values (shorting) is allowed.\n new_b = np.zeros(self.num_stocks)\n for i, _ in enumerate(new_b):\n ppr = predicted_price_rel[i]\n if ppr > 0:\n new_b[i] = self.b[i] + lam * (ppr - mean_price_rel)\n\n # Normalize b so that it sums to 1\n sum_b = np.linalg.norm(new_b, ord=1)\n return (1.0 / sum_b) * new_b", "def __init__(self, start_date=\"2017-01-01\", end_date=datetime.datetime.now().strftime(\"%Y-%m-%d\"), asset_list=[]):\n\n self.start_date = start_date\n self.end_date = end_date\n self.asset_list = asset_list\n self.portfolio = pd.DataFrame()\n self.benchmark = san.get(\"ohlcv/bitcoin\", from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n\n for portfolio_asset in asset_list:\n self.portfolio[portfolio_asset] = san.get(\"ohlcv/\" + portfolio_asset,\n from_date=start_date,\n to_date=end_date).closePriceUsd.pct_change()\n self.portfolio = self.portfolio.replace([np.inf, -np.inf], 0)\n self.metrics = dict()", "def test_low_stockprice_high_interest(self):\n stock_prices = np.array([[5, 4, 4, 2],\n [5, 3, 3, 3],\n [5, 4, 2, 2],\n [5, 3, 3, 1]], dtype=float)\n interest_rate = 2.0 # 200%\n test_case = StockMarket(5, stock_prices, interest_rate)\n test_case.dynamic_programming_bottom_up()\n for portfolio in set(test_case.backtracing_portfolio()):\n self.assertEqual(0, portfolio)", "def evaluate_cur_stocks(self):\n today = datetime.today()\n close_val = PRICE_DF.iloc[PRICE_DF.index.get_loc(today, method=\"ffill\")]\n close_val = close_val[self.cur_stocks.index]\n close_val = pd.DataFrame({\"PRICE_CURRENT\" : close_val.values}, index=self.cur_stocks.index)\n evaluated_stocks = pd.merge(self.cur_stocks, close_val, left_index=True, right_index=True)\n evaluated_stocks[\"VOLUME_CURRENT\"] = evaluated_stocks[\"AMOUNT\"] * evaluated_stocks[\"PRICE_CURRENT\"]\n evaluated_stocks[\"RETURN\"] = (evaluated_stocks[\"VOLUME_CURRENT\"] / evaluated_stocks[\"VOLUME_PURCHASE\"]) - 1\n return evaluated_stocks", "def __calculate_portfolio_returns(self):\n\n p_bar = tqdm(range(1), desc=\" Calculating returns\", leave=False)\n\n trade_data = self.historical_trade_data\n\n # Helper functions to calculate cash inflow and outflow\n def f_min(x):\n return x.apply(lambda x: min(x, 0))\n\n def f_max(x):\n return x.apply(lambda x: max(x, 0))\n\n # Calculate cash inflow and outflow\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash inflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = -1 * trade_data[\"Investment delta\"][:].apply(lambda x: f_min(x), axis=0)\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period cash outflow\"], self.tickers_list + [\"Total\"]]\n )\n ] = trade_data[\"Investment delta\"][:].apply(lambda x: f_max(x), axis=1)\n\n # Calculate period return\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period absolute return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) - (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n )\n\n trade_data[\n pd.MultiIndex.from_product(\n [[\"Period percentage return\"], self.tickers_list + [\"Total\"]]\n )\n ] = (trade_data[\"End Value\"] + trade_data[\"Period cash inflow\"]) / (\n trade_data[\"End Value\"].shift(1).fillna(0)\n + trade_data[\"Period cash outflow\"]\n ) - 1\n\n trade_data[\"Period percentage return\"].fillna(0, inplace=True)\n\n self.historical_trade_data = trade_data\n\n self.portfolio_returns = self.historical_trade_data[\"Period percentage return\"][\n \"Total\"\n ]\n\n p_bar.n += 1\n p_bar.refresh()", "def hedge_portfolio(context, data):\r\n factors = get_alphas_and_betas(context, data)\r\n beta_exposure = 0.0\r\n count = 0\r\n for asset in context.portfolio.positions:\r\n if asset in factors and asset != context.index:\r\n if not np.isnan(factors[asset].beta):\r\n beta_exposure += factors[asset].beta\r\n count += 1\r\n beta_hedge = -1.0 * beta_exposure / count\r\n dollar_amount = context.portfolio.portfolio_value * beta_hedge\r\n record(beta_hedge=beta_hedge)\r\n if not np.isnan(dollar_amount):\r\n order_target_value(context.index, dollar_amount)", "def __init__(\n self,\n portfolio,\n market=None,\n commission_min=5.00,\n commission_pct=0.0,\n buy_percent=1.0,\n sell_percent=1.0,\n pm_threshold=0.0,\n pm_order=1.0,\n risk_free_return=1.0,\n name=None\n ):\n\n # Assumptions\n self.name = name if name else portfolio.name\n self.commission_min = commission_min\n self.commission_pct = commission_pct\n self.buy_percent = buy_percent\n self.sell_percent = sell_percent\n self.pm_threshold = pm_threshold\n self.pm_order = pm_order\n self.risk_free_return = risk_free_return\n self.performance = {}\n\n # Inputs\n self.portfolio = portfolio\n self.market = copy.deepcopy(market) if market else Asset(np.ones(len(self.portfolio.dates)))\n\n # Trading states\n self.long_open = {symbol:False for symbol in portfolio.assets.keys()}\n self.short_open = {symbol:False for symbol in portfolio.assets.keys()}\n\n # Keep track of intermidiate results for performance\n self.trade_data = []\n recordings = [\n 'buy price', 'buy shares', 'buy fees', 'buy date',\n 'sell price', 'sell shares', 'sell fees', 'sell date',\n 'gain', 'profit', 'loss', 'return', 'win/loose',\n 'min balance', 'min date', 'max balance', 'max date',\n 'drawdown', 'drawdown days',\n 'volatility', 'expected_return', 'beta', 'lpm', 'hpm',\n 'max', 'mean', 'min'\n ]\n self.record = {symbol:pd.DataFrame(columns=recordings) for symbol in portfolio.assets.keys()}\n self.max = {symbol:[portfolio.assets[symbol].c.iloc[0], None] for symbol in portfolio.assets.keys()}\n self.min = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}\n self.drawdown = {symbol:[999999999999999, None] for symbol in portfolio.assets.keys()}", "def rebalance(self, date):\n eod_values = self.df.shift(1).loc[date, 'values'].mul(1 + self.tc.instrument_returns.loc[date, 'daily'])\n eod_portfolio_value = sum(eod_values.values)\n\n previous_values = self.df.loc[date, 'values'].copy()\n position_value = self.target_weights.mul(eod_portfolio_value)\n trading_cost = abs(eod_values.div(eod_portfolio_value) - self.target_weights) * eod_portfolio_value * \\\n self.tc.commission\n current_values = position_value - trading_cost\n self.df.loc[date, 'values'] = current_values.values\n future_values = self.tc.instrument_returns.loc[date:, 'cumulative'].div(\n self.tc.instrument_returns.loc[date, 'cumulative']).mul(current_values, axis=1)\n self.df.loc[date:, 'values'] = future_values.values\n trade = pd.Series(current_values - previous_values)\n # Once we have calculated the end-of-day value of the portfolio, we set the allocation by looking at the\n # dollars invested in each ETF\n self.df.loc[date:, 'allocations'] = future_values.div(future_values.sum(axis=1), axis=0).values\n\n return trade", "def before_trading_start(context, data):\n factors = pipeline_output('ff_example')\n\n # get the data we're going to use\n returns = factors['returns']\n mkt_cap = factors.sort_values(['market_cap'], ascending=True)\n be_me = factors.sort_values(['be_me'], ascending=True)\n\n # to compose the six portfolios, split our universe into portions\n half = int(len(mkt_cap)*0.5)\n small_caps = mkt_cap[:half]\n big_caps = mkt_cap[half:]\n \n thirty = int(len(be_me)*0.3)\n seventy = int(len(be_me)*0.7)\n growth = be_me[:thirty]\n neutral = be_me[thirty:seventy]\n value = be_me[seventy:]\n\n # now use the portions to construct the portfolios.\n # note: these portfolios are just lists (indices) of equities\n small_value = small_caps.index.intersection(value.index)\n small_neutral = small_caps.index.intersection(neutral.index)\n small_growth = small_caps.index.intersection(growth.index)\n \n big_value = big_caps.index.intersection(value.index)\n big_neutral = big_caps.index.intersection(neutral.index)\n big_growth = big_caps.index.intersection(growth.index)\n\n # take the mean to get the portfolio return, assuming uniform\n # allocation to its constituent equities.\n sv = returns[small_value].mean()\n sn = returns[small_neutral].mean()\n sg = returns[small_growth].mean()\n \n bv = returns[big_value].mean()\n bn = returns[big_neutral].mean()\n bg = returns[big_growth].mean()\n\n # computing SMB\n context.smb = (sv + sn + sg)/3 - (bv + bn + bg)/3\n\n # computing HML\n context.hml = (sv + bv)/2 - (sg + bg)/2", "def declare_new_budget(date, exp_data):\n\n exp_list = exp_data[env.EXPENSE_DATA_KEY]\n local_budget = {}\n month_total = util.get_float_input(\n f\"Please input your total for the month ending {date}: \", force_pos=True)\n budg_remaining = month_total\n\n for i, exp in enumerate(exp_list):\n if i == len(exp_list) - 1:\n print(\"I got the last one for you :) MATH!\")\n budg_amnt = budg_remaining\n budg_remaining = 0\n\n elif budg_remaining == 0: # elif skips this condition if budget remaining is set above\n budg_amnt = 0\n local_budget[env.BUDGET_TOTAL_KEY] = month_total\n else:\n prompt = f\"Enter your budget for: [{exp}] - Total Budget Re. ${budg_remaining} - Exp's Re. [{len(exp_list) - i - 1}]: \"\n budg_amnt = prompt_for_budget_amnt(\n prompt, budg_remaining, exp_data)\n local_budget.update({exp: budg_amnt})\n budg_remaining = round(month_total - sum_budget(local_budget), 2)\n print(local_budget)\n return local_budget", "def size_portfolio_replic(df1, df2, df3, step3=None):\r\n # merge crsp & delisted stock returns\r\n _crsp = pd.merge(df1, df2, how='outer', on=['date', 'permno'])\r\n\r\n # use the two returns together\r\n _crsp = _crsp[~(_crsp['ret'].isna() & _crsp['dlret'].isna())]\r\n _crsp.loc[_crsp['ret'].isna(), 'ret'] = 0\r\n _crsp.loc[_crsp['dlret'].isna(), 'dlret'] = 0\r\n _crsp['ret'] = (_crsp['ret'] + 1) * (_crsp['dlret'] + 1) - 1\r\n\r\n _crsp['prc'] = abs(_crsp['prc']) # use positive price\r\n _crsp['me'] = _crsp['prc'] * _crsp['shrout'] / 1000 # shares in thousands, market value in millions\r\n _crsp['year'] = _crsp['date'].dt.year\r\n _crsp['month'] = _crsp['date'].dt.month\r\n\r\n # sort the df first by stock code then by date\r\n _crsp = _crsp.sort_values(by=['permno', 'date'], ascending=True)\r\n\r\n # get the market value for the previous month, for the value-weighted rets calculations later\r\n _crsp['lag_me'] = _crsp['me'].shift(1)\r\n _crsp.loc[_crsp['permno'].shift(1) != _crsp['permno'], 'lag_me'] = np.nan\r\n\r\n # get the fiscal year: if month<=6 then fyear = year - 1\r\n _crsp['fyear'] = _crsp['year']\r\n _crsp.loc[_crsp['month'] <= 6, 'fyear'] = _crsp['fyear'] - 1\r\n\r\n # at the end of each June, use me as indicator to construct new portfolio\r\n _construct = _crsp[_crsp['month'] == 6].copy()\r\n _construct['fyear'] = _construct['fyear'] + 1 # me at June is used for the following fyear\r\n _construct = _construct[['fyear', 'me', 'permno']]\r\n\r\n # merge the me indicator with the original dataset\r\n # now there are 2 me related values:\r\n # lag_me for vw ret calculation, and me_ind for decile classification\r\n _crsp = pd.merge(_crsp, _construct, how='left', on=['fyear', 'permno'])\r\n _crsp = _crsp[_crsp['me_y'].notna()]\r\n _crsp = _crsp.drop(columns='me_x')\r\n _crsp.rename(columns={'me_y': 'me_ind'}, inplace=True)\r\n\r\n # obtain the breakpoints, use nyse stocks and me at end of June (start of July)\r\n _nyse = _crsp.loc[(_crsp['exchcd'] == 1) & (_crsp['month'] == 7)]\r\n\r\n # use quantile function to get breakpoints for each time period\r\n _indicator = _nyse.groupby(['fyear'])['me_ind'].quantile(0.1).to_frame()\r\n _indicator.reset_index(drop=False, inplace=True)\r\n _indicator.rename(columns={'me_ind': 'd'}, inplace=True)\r\n for i in range(2, 10):\r\n _dec_insert = _nyse.groupby(['fyear'])['me_ind'].quantile(0.1 * i)\r\n _dec_insert.reset_index(drop=True, inplace=True)\r\n _indicator.insert(_indicator.shape[1], 'd' * i, _dec_insert)\r\n\r\n # merge the breakpoints to the original dataset\r\n _crsp = pd.merge(_crsp, _indicator, how='left', on=['fyear'])\r\n\r\n # obtain the decile for each observation\r\n _crsp.loc[(_crsp['me_ind'] <= _crsp['d']), 'decile'] = 1 # dec1\r\n _crsp.loc[(_crsp['me_ind'] > _crsp['d' * 9]), 'decile'] = 10 # dec10\r\n for i in range(1, 9):\r\n _crsp.loc[(_crsp['me_ind'] > _crsp['d' * i])\r\n & (_crsp['me_ind'] <= _crsp['d' * (i + 1)]), 'decile'] = i + 1 # dec2-9\r\n\r\n # if step3 is true, return crsp for HML & SMB calculation\r\n if step3:\r\n return _crsp\r\n\r\n # obtain the value-weighted rets for each month\r\n _crsp['ret*lag_me'] = _crsp['ret'] * _crsp['lag_me']\r\n _crsp_vw = (_crsp.groupby(['year', 'month', 'decile'])['ret*lag_me'].sum() /\r\n _crsp.groupby(['year', 'month', 'decile'])['lag_me'].sum()).to_frame()\r\n _crsp_vw.reset_index(drop=False, inplace=True)\r\n _crsp_vw.rename(columns={'decile': 'port', 0: 'Size_Ret'}, inplace=True)\r\n\r\n # restrict time from Jan1973 to Dec2020\r\n _crsp_vw = _crsp_vw.loc[(_crsp_vw['year'] <= 2020) & (_crsp_vw['year'] >= 1973)]\r\n _crsp_vw.reset_index(drop=True, inplace=True)\r\n\r\n _ff = df3.copy()\r\n _rf = _ff[['year', 'month', 'RF']] # get risk-free from ff\r\n _ff['wml_size'] = _ff['ME01'] - _ff['ME10'] # get long-short portfolio by dec1 minus dec10 in ff\r\n _crsp_vw = pd.merge(_crsp_vw, _rf, on=['year', 'month'], how='inner')\r\n _crsp_vw['exret'] = _crsp_vw['Size_Ret'] - _crsp_vw['RF'] # get excess returns\r\n\r\n # get long-short portfolio by dec1 minus dec10 in replication\r\n _ls = pd.merge(_crsp_vw[_crsp_vw['port'] == 1], _crsp_vw[_crsp_vw['port'] == 10], on=['year', 'month'], how='inner')\r\n _ls['wml'] = _ls['Size_Ret_x'] - _ls['Size_Ret_y'] # dec1 - dec10\r\n _ls = _ls[['year', 'month', 'wml']]\r\n\r\n # annualized, in percentage\r\n _ls_mean = np.mean(_ls['wml']) * 12 * 100\r\n _ls_std = np.std(_ls['wml']) * np.sqrt(12) * 100\r\n\r\n # get output values,\r\n # rows are exrets, standard deviations, Sharpe Ratios, skewnesses, and correlations with ff\r\n # columns are dec1 to dec10, and long-short\r\n _output = pd.DataFrame(index=np.arange(5), columns=np.arange(11))\r\n _output.iloc[[0], :-1] = _crsp_vw.groupby('port')['exret'].mean() * 12 * 100\r\n _output.iloc[[0], [10]] = _ls_mean\r\n _output.iloc[[1], :-1] = _crsp_vw.groupby('port')['exret'].std() * np.sqrt(12) * 100\r\n _output.iloc[[1], [10]] = _ls_std\r\n _output.iloc[[2], :-1] = np.array(_output.iloc[[0], :-1]) / np.array(_output.iloc[[1], :-1])\r\n _output.iloc[[2], [10]] = _ls_mean / _ls_std\r\n _output.iloc[[3], :-1] = _crsp_vw.groupby('port')['exret'].skew()\r\n _output.iloc[[3], [10]] = skew(_ls['wml'])\r\n\r\n # get the correlations for each decile between replication and ff\r\n for i in range(11):\r\n if i <= 8:\r\n _replic = _crsp_vw[_crsp_vw['port'] == (i + 1)]\r\n _replic.reset_index(drop=True, inplace=True)\r\n _ff_group = _ff[['year', 'month', ('ME0' + str(i + 1)), 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['ME0' + str(i + 1)] - _ff_group['RF']\r\n elif i == 9:\r\n _replic = _crsp_vw[_crsp_vw['port'] == (i + 1)]\r\n _replic.reset_index(drop=True, inplace=True)\r\n _ff_group = _ff[['year', 'month', 'ME10', 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['ME10'] - _ff_group['RF']\r\n else:\r\n _replic = _ls\r\n _replic['exret'] = _replic['wml']\r\n _ff_group = _ff[['year', 'month', 'wml_size', 'RF']]\r\n _ff_group['exret_ff'] = _ff_group['wml_size']\r\n _compare = pd.merge(_replic, _ff_group, on=['year', 'month'], how='left')\r\n _output.iloc[[4], [i]] = _compare.corr().loc['exret', 'exret_ff']\r\n\r\n # rename the output stats\r\n _output.rename(\r\n columns={0: 'D1', 1: 'D2', 2: 'D3', 3: 'D4', 4: 'D5',\r\n 5: 'D6', 6: 'D7', 7: 'D8', 8: 'D9', 9: 'D10', 10: 'LS'},\r\n index={0: 'exret', 1: 'sd', 2: 'SR', 3: 'skew', 4: 'corr'},\r\n inplace=True)\r\n return _output" ]
[ "0.7288363", "0.6751513", "0.649566", "0.639736", "0.6342396", "0.58377224", "0.5771629", "0.57363266", "0.5704653", "0.5654795", "0.56540567", "0.564203", "0.5640176", "0.5620506", "0.55888516", "0.55628633", "0.5536075", "0.553035", "0.55150396", "0.55067617", "0.5476467", "0.54720634", "0.5450717", "0.5437177", "0.5428002", "0.5424734", "0.5400011", "0.53931135", "0.5391972", "0.5371628" ]
0.7756678
0
Calculate sharpe ratio for minimizer.
def get_sharpe_ratio(allocs, prices): port_val = get_portfolio_value(prices, allocs, start_val=1.0) sharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3] return -sharpe_ratio
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sharpe_ratio(self, r_f):\n return (\n self.cumulative_returns().last('1D').iat[0] - r_f\n ) / self.cumulative_returns().std()", "def sharpe_ratio(r1, r2, rf, o1, o2, cov):\n def sr(x):\n w1 = x[0]\n w2 = 1 - w1\n\n Rp = w1 * r1 + w2 * r2\n STDEVp = math.sqrt(portfolio_variance(o1, o2, cov)(x))\n R = (Rp - rf) / STDEVp\n return R\n return sr", "def sharpe_ratio(adr,sddr,sf=252,rfr=0.0):\n rfr=((1.0 + rfr) ** (1/sf)) - 1 # Daily risk free return. This is the shortcut to calculate daily (sf=252) risk free return\n return sf**(1.0/2)*(adr-rfr)/sddr", "def calculate_gear_ratio(front_gear, back_gear):\n return front_gear/back_gear", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (np.mean(port_returns) - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def sharpe_ratio(port_returns, risk_free_rate, asset_returns, weights):\n\n # calculate the standard deviation of the returns of the portfolio\n portfolio_standard_deviation = np.sqrt(portfolio_volatility(asset_returns, weights))\n\n # calculate the Sharpe ratio of the portfolio\n sr = (port_returns[-1] - risk_free_rate)/portfolio_standard_deviation\n\n return sr", "def adv_ratio(self): # XXX\r\n bw = StatsRouter.global_bw_mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/bw", "def sharpe_ratio(factor_returns, annualization_factor):\r\n\r\n return annualization_factor * factor_returns.mean() / factor_returns.std()", "def bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if bw == 0.0: return 0\r\n else: return self.bw/(1024.*bw)", "def smape(self) -> float:\n _temp = np.sum(2 * np.abs(self.predicted - self.true) / (np.abs(self.true) + np.abs(self.predicted)))\n return float(100 / len(self.true) * _temp)", "def starsize(self, hipid):\n #if hipid<0 or len(self.hip_stars)<=hipid: return 0\n s = self.hip_stars[hipid]\n if s==None: return 0\n #return self.zerosize*(.8**(s[1]))\n #return self.zerosize-s[1]-2\n return self.dimmest_mag-s[1]+1", "def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))", "def horizontal_ratio(self):\n if self.pupils_located:\n pupil_left = self.eye_left.pupil.x / (self.eye_left.center[0] * 2 - 10)\n pupil_right = self.eye_right.pupil.x / (self.eye_right.center[0] * 2 - 10)\n return (pupil_left + pupil_right) / 2", "def infected_ratio(self):\n if self.max_pop != 0:\n return int(self.infected_pop) / self.max_pop\n else:\n return 1", "def pe_ratio(self):\n if self._pe_ratio == None:\n return float('inf')\n return self._pe_ratio", "def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y", "def golden_ratio():\n print((1+math.sqrt(5))/2)", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def quick_ratio(self):\n return (\n self.current_assets - self.inventory_net) / self.current_liabilities", "def golden_ratio():\n return 1.61803398875", "def pe_ratio(self):\n try:\n return self.price / self.dividend_yield\n except ZeroDivisionError:\n return 0.0", "def get_scaling_ratio(img):\n\n healthy_img_area = 4872 * 6496\n input_img_area = img.shape[0] * img.shape[1]\n ratio = input_img_area / healthy_img_area\n return ratio", "def get_expected_compression_ratio_pct(self) -> int:\n return 100", "def calc_NPSH(P_suction, P_vapor, rho_liq):\n # Note: NPSH = (P_suction - P_vapor)/(rho_liq*gravity)\n # Taking into account units, NPSH will be equal to return value\n return 0.334438*(P_suction - P_vapor)/rho_liq", "def sharpe_ratio(returns, risk_free=0, period=DAILY):\n\n returns_risk_adj = returns - risk_free\n\n if (len(returns_risk_adj) < 5) or np.all(returns_risk_adj == 0):\n return np.nan\n\n return np.mean(returns_risk_adj) / \\\n np.std(returns_risk_adj) * \\\n np.sqrt(ANNUALIZATION_FACTORS[period])", "def ratio(original, compressed):\n olen = len(original)\n clen = len(compressed)\n return (olen - clen) / olen", "def perfect_ratios(g, goal):\n if goal == 'ORE':\n return 1\n\n return Fraction(sum(perfect_ratios(g, subgoal) * mul for subgoal, mul in\n g[goal][1].items()), g[goal][0])", "def strm_bw_ratio(self):\r\n bw = self.bwstats.mean\r\n if StatsRouter.global_strm_mean == 0.0: return 0\r\n else: return (1.0*bw)/StatsRouter.global_strm_mean", "def sharpe(returns):\n return returns.mean() / returns.std()", "def sharpness_penalty(self):\n # This polynomial function gives the gain for peaking filter which achieves 18 dB / octave max derivative\n # The polynomial estimate is accurate in the vicinity of 18 dB / octave\n gain_limit = -0.09503189270199464 + 20.575128011847003 * (1 / self.q)\n # Scaled sigmoid function as penalty coefficient\n x = self.gain / gain_limit - 1\n sharpness_penalty_coefficient = 1 / (1 + np.e ** (-x * 100))\n return np.mean(np.square(self.fr * sharpness_penalty_coefficient))" ]
[ "0.6567362", "0.6437101", "0.63409936", "0.6130169", "0.6042319", "0.60273135", "0.5931197", "0.59244883", "0.5785139", "0.5736135", "0.57228225", "0.5692179", "0.5680298", "0.5669893", "0.5615704", "0.5592618", "0.55892605", "0.55826575", "0.55295515", "0.5504721", "0.54996413", "0.5473298", "0.54647964", "0.54578286", "0.54437244", "0.543583", "0.5428085", "0.54129666", "0.5402032", "0.53897303" ]
0.67490816
0
Creates a SnowflakeSource from a protobuf representation of a SnowflakeSource.
def from_proto(data_source: DataSourceProto): return SnowflakeSource( field_mapping=dict(data_source.field_mapping), database=data_source.snowflake_options.database, schema=data_source.snowflake_options.schema, table=data_source.snowflake_options.table, event_timestamp_column=data_source.event_timestamp_column, created_timestamp_column=data_source.created_timestamp_column, date_partition_column=data_source.date_partition_column, query=data_source.snowflake_options.query, )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def from_dict(cls, dikt) -> 'SourceSchema':\n return util.deserialize_model(dikt, cls)", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def FromWireFormat(cls, value):\n return _GetFactory(cls).FromWireFormat(value)", "def FromProto(cls, proto_obj):\n if not proto_obj.last_update_source:\n raise GameModelError('No update source specified in Game creation.')\n # TODO(P2): refactor all constructors into one base function like in tweets.\n return Game(id_str=proto_obj.id_str,\n teams=[Team.FromProto(tm) for tm in proto_obj.teams],\n scores=proto_obj.scores,\n name=proto_obj.name,\n tournament_id=proto_obj.tournament_id_str,\n tournament_name=proto_obj.tournament_name,\n game_status=proto_obj.game_status,\n division=proto_obj.division,\n league=proto_obj.league,\n age_bracket=proto_obj.age_bracket,\n sources=[GameSource.FromProto(proto_obj.last_update_source)],\n key=game_key(proto_obj))", "def from_dict(cls, dikt) -> 'SourceAudit':\n return util.deserialize_model(dikt, cls)", "def create_from_source(self):\n create_statement = self.source.create_statement\n self.create_from_statement(create_statement)\n # Add constraints\n constraints = self.source.constraints\n self.add_constraints(constraints)\n\n # Add indexes\n indexes = self.source.indexes\n self.add_indexes(indexes)\n\n # Add the non-referenced foreign keys\n non_referenced_fks = [x for x in self.source.foreign_keys if not x.referenced]\n self.add_foreign_keys(non_referenced_fks, override_table=self.name)", "def FromBytes(cls, value: bytes):\n precondition.AssertType(value, bytes)\n return _GetFactory(cls).FromBytes(value)", "def fromSerpent(\n cls, source, sourcename=None, postcheck=True, strict=True, names=None,\n ):\n return super().fromSerpent(\n source,\n sourcename=sourcename,\n postcheck=postcheck,\n strict=strict,\n names=names,\n )", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def from_serialized_proto(cls, proto_string: bytes) -> 'TableInfo':\n proto = schema_pb2.TableInfo.FromString(proto_string)\n if proto.HasField('signature'):\n signature = nested_structure_coder.decode_proto(proto.signature)\n else:\n signature = None\n return cls(\n name=proto.name,\n sampler_options=proto.sampler_options,\n remover_options=proto.remover_options,\n max_size=proto.max_size,\n max_times_sampled=proto.max_times_sampled,\n rate_limiter_info=proto.rate_limiter_info,\n signature=signature,\n current_size=proto.current_size,\n num_episodes=proto.num_episodes,\n num_deleted_episodes=proto.num_deleted_episodes,\n num_unique_samples=proto.num_unique_samples,\n table_worker_time=proto.table_worker_time,\n )", "def from_struct(cls, struct, source):\n try:\n if struct['pubDate'] != 'None':\n date = datetime.datetime.strptime(struct['pubDate'], \"%a, %d %b %Y %H:%M\")\n else:\n date = datetime.datetime.now()\n\n return cls.create(\n title=struct['title'],\n description=struct['description'],\n dec_description=struct['dec_description'],\n link=struct['link'],\n pubDate=date,\n media=json.dumps(struct['media']),\n source=source,\n links=json.dumps(struct['links']),\n dec_links=json.dumps(struct['dec_links'])\n )\n except peewee.IntegrityError:\n return None", "def from_proto(cls, feature_set_proto: FeatureSetProto):\n\n feature_set = cls(\n name=feature_set_proto.spec.name,\n features=[\n Feature.from_proto(feature)\n for feature in feature_set_proto.spec.features\n ],\n entities=[\n Entity.from_proto(entity) for entity in feature_set_proto.spec.entities\n ],\n max_age=(\n None\n if feature_set_proto.spec.max_age.seconds == 0\n and feature_set_proto.spec.max_age.nanos == 0\n else feature_set_proto.spec.max_age\n ),\n labels=feature_set_proto.spec.labels,\n source=(\n None\n if feature_set_proto.spec.source.type == 0\n else Source.from_proto(feature_set_proto.spec.source)\n ),\n project=None\n if len(feature_set_proto.spec.project) == 0\n else feature_set_proto.spec.project,\n )\n feature_set._status = feature_set_proto.meta.status # type: ignore\n feature_set._created_timestamp = feature_set_proto.meta.created_timestamp\n return feature_set", "def from_bytes(buf: bytes) -> 'ProposalInfo':\n proposal_info_in_dict: dict = json_loads(buf.decode())\n proposal_info_in_dict[\"id\"] = bytes.fromhex(proposal_info_in_dict[\"id\"])\n proposal_info_in_dict[\"proposer\"] = Address.from_string(proposal_info_in_dict[\"proposer\"])\n return ProposalInfo(**proposal_info_in_dict)", "def parse_pbobject(source, pb_class):\n if isinstance(source, str):\n return open_pbobject(source, pb_class)\n elif isinstance(source, bytes):\n pb_object = pb_class()\n pb_object.ParseFromString(source)\n return pb_object\n else:\n logging.error(f'cannot parse type {type(source)}')", "def create(self, saved_source_id):\n raw_saved_source_data = self.es.get(index='.kibana', doc_type='doc', id=saved_source_id)\n\n saved_source_type = raw_saved_source_data['_source']['type']\n\n if saved_source_type == 'search':\n return SavedSearch(saved_source_id, self.conf)\n elif saved_source_type == 'visualization':\n return SavedVisualization(saved_source_id, self.conf)\n else:\n # TODO: Raise some exception.\n pass", "def from_dict(cls: T, source: dict[str, Any], connection: Connection) -> T:\n return super(Entity, cls).from_dict(source=source, connection=connection)", "def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):\n solver_param = caffe_pb2.SolverParameter()\n with open(caffe_solver_prototxt_file, 'rt') as f:\n pb2.text_format.Merge(f.read(), solver_param)\n dictionary = {'lr_policy': solver_param.lr_policy,\n 'base_lr': solver_param.base_lr,\n 'gamma': solver_param.gamma,\n 'momentum': solver_param.momentum,\n 'max_iter': solver_param.max_iter,\n 'stepsize': solver_param.stepsize,\n 'stepvalues': solver_param.stepvalue,\n 'weight_decay': solver_param.weight_decay,\n 'iter_size': solver_param.iter_size,\n 'from_prototxt': caffe_solver_prototxt_file}\n return cls(**dictionary)", "def FromProto(cls, proto_obj):\n key=None\n if proto_obj.twitter_account:\n twitter_id = long(proto_obj.twitter_account.id_str)\n key = team_twitter_key(twitter_id)\n else:\n twitter_id = 0\n if proto_obj.score_reporter_account:\n score_reporter_id = proto_obj.score_reporter_account.id\n key = team_score_reporter_key(score_reporter_id)\n else:\n score_reporter_id = ''\n return Team(twitter_id=twitter_id, score_reporter_id=score_reporter_id,\n parent=key)", "def from_config(config: Dict[str, Any]):\n source_name = config[\"source\"]\n host = config.get(\"host\", \"localhost\")\n port = config.get(\"port\", 8081)\n api_key = (config.get(\"api_key_name\", \"\"), config.get(\"api_key\", \"\"))\n return KukurSource(source_name, host, port, api_key)", "def from_json(cls, point_source, json_text):\n data = json.loads(json_text)\n point = point_source.getPoint(data['index'])\n # Fairly limited amount of verification we can do\n assert point.label == data['label']\n return point", "def _set_source(source, context):\n if isinstance(source, (str, list, dict, Dataset)):\n return Source(source, context)\n elif isinstance(source, Source):\n return source\n else:\n raise ValueError('Wrong source')", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def fromBytes(cls, inBytes):\n return cls.fromJson(inBytes.decode())", "def parse(filename_or_obj):\n if isinstance(filename_or_obj, basestring):\n # Anything ObsPy can read.\n try:\n src = obspy.readEvents(filename_or_obj)\n except:\n pass\n else:\n return Source.parse(src)\n # CMT solution file.\n try:\n return Source.from_CMTSOLUTION_file(filename_or_obj)\n except:\n pass\n raise SourceParseError(\"Could not parse the given source.\")\n elif isinstance(filename_or_obj, obspy.Catalog):\n if len(filename_or_obj) == 0:\n raise SourceParseError(\"Event catalog contains zero events.\")\n elif len(filename_or_obj) > 1:\n raise SourceParseError(\n \"Event catalog contains %i events. Only one is allowed. \"\n \"Please parse seperately.\" % len(filename_or_obj))\n return Source.parse(filename_or_obj[0])\n elif isinstance(filename_or_obj, obspy.core.event.Event):\n ev = filename_or_obj\n if not ev.origins:\n raise SourceParseError(\"Event must contain an origin.\")\n if not ev.focal_mechanisms:\n raise SourceParseError(\"Event must contain a focal mechanism.\")\n org = ev.preferred_origin() or ev.origins[0]\n fm = ev.preferred_focal_mechanism() or ev.focal_mechansisms[0]\n if not fm.moment_tensor:\n raise SourceParseError(\"Event must contain a moment tensor.\")\n t = fm.moment_tensor.tensor\n return Source(\n latitude=org.latitude,\n longitude=org.longitude,\n depth_in_m=org.depth,\n m_rr=t.m_rr,\n m_tt=t.m_tt,\n m_pp=t.m_pp,\n m_rt=t.m_rt,\n m_rp=t.m_rp,\n m_tp=t.m_tp)\n else:\n raise NotImplementedError", "def _from_cpp(self, str_msg, cls):\n msg = cls()\n result = msg.deserialize(str_msg)\n return result" ]
[ "0.69648314", "0.6726757", "0.62188435", "0.6085031", "0.54869676", "0.53810257", "0.5301759", "0.52652085", "0.5256398", "0.52558035", "0.52312374", "0.5175181", "0.51523924", "0.51177007", "0.5046059", "0.5044137", "0.50330454", "0.50251067", "0.5004873", "0.49959263", "0.49941787", "0.49703214", "0.49626857", "0.49538267", "0.49383047", "0.49380133", "0.49380133", "0.49380133", "0.49323687", "0.49278316" ]
0.8149464
0
Returns the database of this snowflake source.
def database(self): return self.snowflake_options.database
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_database(self):\n return self.database", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database\")", "def database(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database\")", "def database(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database\")", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def database(self):\n return self._database", "def get_database(self):\n if self._database is None:\n conn = self.get_connection()\n db = conn[self.database]\n self._database = db\n\n return self._database", "def database(self):\n\n return self._database", "def database(self):\n\n return self._database", "def dbname(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"dbname\")", "def database_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"database_name\")", "def db(self) -> str:\n return self._db", "def getDatabaseName(self):\n raise NotImplementedError", "def getDatabaseName(self):\n return self._base.getDatabaseName()", "def db(self):\n return self._project.db", "def database_name(self) -> str:\n return pulumi.get(self, \"database_name\")", "def getDb(self):\n return self.db", "def database_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"database_name\")", "def database():\n return conf().database", "def database_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"database_name\")", "def get_database(self, instance, name):\n return instance.get_database(name)", "def get_db_name(self):\n\t\treturn conf.db_name", "def schema(self):\n return self.snowflake_options.schema", "def database(self):\n try:\n return self._database\n except:\n database = self.application.connection[self.database_name]\n self._database = database\n return database", "def current_db(self):\n return self._current_db", "def get_db(self):\n return self._db", "def get_db(self):\n db = getattr(g, '_database', None)\n if db is None:\n db = g._database = connect(DATABASE)\n return db" ]
[ "0.7446689", "0.7409722", "0.7409722", "0.7361835", "0.73268086", "0.70755297", "0.70755297", "0.70755297", "0.70755297", "0.7035698", "0.70008326", "0.70008326", "0.6984017", "0.69791114", "0.6932914", "0.69217163", "0.6915366", "0.6904952", "0.6886391", "0.68326914", "0.6831212", "0.68180186", "0.68058586", "0.67343223", "0.6707062", "0.6660032", "0.66552055", "0.6643545", "0.66404915", "0.66191036" ]
0.85056674
0
Returns the schema of this snowflake source.
def schema(self): return self.snowflake_options.schema
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def schema(self):\n return self.table_info.schema", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def get_schema(self):\r\n return self.__schema", "def schema(self):\n return self._schema", "def get_schema(self):\n response = self.client.get(self._get_collection_url('schema'))\n\n return response.get('schema', {})", "def _schema(self):\n\n self._check_compiled()\n return self._compiled._schema", "def schema(self):\n return _parse_schema_resource(self._properties.get(\"schema\", {}))", "def get_schema(cls):\n return cls.schema()", "def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA", "def schema(self) -> str:\n return parse_schema(self._spec[\"schema\"])", "def schema(self) -> 'outputs.TableSchemaResponse':\n return pulumi.get(self, \"schema\")", "def schema(self):\n # type: () -> object\n return self._schema", "def get_schema(self) -> dict:\n return schemas.get_object_schema(self.schema)", "def get_schema(self) -> ArchiveSchema:\n return self.schema", "def reference_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationReferenceDataSourceReferenceSchemaArgs']:\n return pulumi.get(self, \"reference_schema\")", "def _get_schema(self):\n self._pick()\n return Schema()", "def schema(self) -> graphql.GraphQLSchema:\n return self._schema", "def schema(self):\n return self.prov[PROV_SCHEMA]", "def get_schema(self, engine_name):\n endpoint = \"engines/{}/schema\".format(engine_name)\n return self.swiftype_session.request('get', endpoint)", "def schema(self):\n pass", "def destination_schema(self) -> pulumi.Input['ApplicationApplicationConfigurationSqlApplicationConfigurationOutputDestinationSchemaArgs']:\n return pulumi.get(self, \"destination_schema\")", "def get_schema(self) -> dict:", "def schema(cls):\n return Schema.get_instance(cls)", "def schema(self) -> Schema:\n return next(schema for schema in self.metadata.schemas if schema.schema_id == self.metadata.current_schema_id)", "def schema(self):\n raise NotImplementedError", "def schema_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"schema_name\")", "def schema(self) -> Dict[str, Dict]:\n return self._schema", "def output_schema(self) -> Optional[str]:\n return pulumi.get(self, \"output_schema\")", "def get_meta_schema(self):\n return self._tc_meta_schema", "def getSchema( sourceDirectory ):\r\n if( sourceDirectory == settings.LEXISNEXIS_FILETAG ): return LexisNexisSchema()\r\n raise Exception( \"Filer for source <%s> is not registered in getSchema( source ).\" % ( sourceDirectory ) )" ]
[ "0.7523006", "0.7474272", "0.731446", "0.72799426", "0.7248296", "0.72096664", "0.72077894", "0.7188622", "0.71739715", "0.71583384", "0.7152522", "0.71101445", "0.6935228", "0.68277", "0.6764784", "0.67362326", "0.67253757", "0.6720299", "0.67055655", "0.6627854", "0.66070616", "0.6565936", "0.6549127", "0.65442", "0.65237534", "0.65216213", "0.65009224", "0.6483633", "0.64834803", "0.6470147" ]
0.8667721
0
Returns the table of this snowflake source.
def table(self): return self.snowflake_options.table
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getTable(self):\n return self.table", "def _get_table(self):\n\t\treturn self._table", "def get_tablename(self):\n return self.ds_table", "def getTable(self):\n\n raise NotImplementedError", "def table(self):\n if not self.exists:\n return None\n return self._get_table()", "def table(self):\n return self.reference.table", "def getTable(self):\n return self.db.table(self.entity)", "def table(self):\n return self.generator.table", "def table(self):\n return self._table", "def table(self):\n return self._table", "def table(self):\r\n return self._table", "def table(self):\n return self._table_name", "def table(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"table\")", "def getTable(self, name: str):\n query = f\"SELECT * FROM '{name}';\"\n result = sql.executeAndReadQuery(self.connection, query)\n return result", "def get_table_name(self):\n return self._table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def __getTable(self):\n\n if not self.__table:\n tableConnectionParams = parseConnectionString(\n self.tableConnString);\n\n self.__table = Table(\n tableConnectionParams['name'],\n connection = getDbConnection(tableConnectionParams));\n\n return self.__table;", "def table(self, table_name):\n return self._get_storage().table(table_name)", "def destination_table(self) -> str:\n return pulumi.get(self, \"destination_table\")", "def table(self) -> 'outputs.PreventionJobTriggerInspectJobActionSaveFindingsOutputConfigTable':\n return pulumi.get(self, \"table\")", "def table(cls):\n return cls.__name__", "def get_target_table(self, source):\n target_tables = set()\n target_fields = [t[1] for t in self.mapping.items() if t[0].split('.')[0] == source]\n for f in target_fields:\n target_tables.update([c.split('.')[0] for c in f.keys()])\n self.target_tables = list(target_tables)\n return self.target_tables", "def getTableDefForTable(self, tableName):\n\t\tif not \".\" in tableName:\n\t\t\ttableName = \"public.\"+tableName\n\t\t\n\t\tfor row in self.readerConnection.queryToDicts(\n\t\t\t\t\"select sourcerd, tablename from dc.tablemeta where\"\n\t\t\t\t\" lower(tableName)=%(tableName)s\",\n\t\t\t\t{\"tableName\": tableName.lower()}):\n\t\t\tbreak\n\t\telse:\n\t\t\traise base.ui.logOldExc(\n\t\t\t\tbase.NotFoundError(tableName, \"table\", \"dc_tables\"))\n\n\t\treturn base.caches.getRD(row[\"sourcerd\"]\n\t\t\t).getById(row[\"tablename\"].split(\".\")[-1])", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"table_name\")", "def table(self):\n if self._table is None:\n self._table = list(self._iter_rows())\n\n return self._table", "def table(self) -> 'outputs.PreventionStoredInfoTypeLargeCustomDictionaryBigQueryFieldTable':\n return pulumi.get(self, \"table\")", "def table(self):\n return self.t", "def getTable(self, tablename):\n tablename = self.prefix + tablename\n if not tablename in self.tables:\n self.tables[tablename] = Table( tablename, self.metadata, \\\n autoload=True, autoload_with=self.conn )\n\n return self.tables[tablename]" ]
[ "0.7333813", "0.72519577", "0.7144599", "0.7141145", "0.6996592", "0.6968488", "0.6951184", "0.6948223", "0.69295055", "0.69295055", "0.68013984", "0.6795885", "0.674512", "0.66221476", "0.65775824", "0.6573351", "0.6545", "0.65317136", "0.6509364", "0.6500206", "0.6481945", "0.6466261", "0.64085484", "0.640222", "0.640222", "0.640222", "0.6378164", "0.636884", "0.634804", "0.63110393" ]
0.8042992
0
Converts a SnowflakeSource object to its protobuf representation.
def to_proto(self) -> DataSourceProto: data_source_proto = DataSourceProto( type=DataSourceProto.BATCH_SNOWFLAKE, field_mapping=self.field_mapping, snowflake_options=self.snowflake_options.to_proto(), ) data_source_proto.event_timestamp_column = self.event_timestamp_column data_source_proto.created_timestamp_column = self.created_timestamp_column data_source_proto.date_partition_column = self.date_partition_column return data_source_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def to_proto(self) -> None:\n\n pass", "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def get_source_unicode(obj):\n return inspect.getsource(obj).decode(get_encoding(obj))", "def to_proto(self) -> FeatureSetReferenceProto:\n return self.proto", "def _object2proto(self) -> Metadata_PB:\n return Metadata_PB(\n name=self.name, id=serialize(self.id), node=serialize(self.node)\n )", "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def get_protobuf_schema() -> GeneratedProtocolMessageType:\n return StorableObject_PB", "def to_proto(self):\n filename_tensor = array_ops.placeholder(\n shape=[], dtype=dtypes.string, name=\"saver_filename\")\n save_tensor = self._traced_save(filename_tensor)\n restore_op = self._traced_restore(filename_tensor).op\n return saver_pb2.SaverDef(\n filename_tensor_name=filename_tensor.name,\n save_tensor_name=save_tensor.name,\n restore_op_name=restore_op.name,\n version=saver_pb2.SaverDef.V2)", "def to_proto(self) -> FeatureSetProto:\n\n meta = FeatureSetMetaProto(\n created_timestamp=self.created_timestamp, status=self.status\n )\n\n spec = FeatureSetSpecProto(\n name=self.name,\n project=self.project,\n max_age=self.max_age,\n labels=self.labels,\n source=self.source.to_proto() if self.source is not None else None,\n features=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Feature\n ],\n entities=[\n field.to_proto()\n for field in self._fields.values()\n if type(field) == Entity\n ],\n )\n\n return FeatureSetProto(spec=spec, meta=meta)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def toStr(self, protoObj):\n return text_format.MessageToString(protoObj)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def make(self, source):\n if isinstance(source, str):\n return copy(self.get(source))\n elif self.PB_CLASS and isinstance(source, self.PB_CLASS):\n item = copy(self.get(source.name))\n item._pb = source\n return item\n else:\n return copy(source)", "def get_source_unicode(obj):\n return inspect.getsource(obj)", "def _object2proto(self) -> GetGroupsMessage_PB:\n return GetGroupsMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def ToProto(self):\n game = scores_messages.Game()\n game.id_str = self.id_str\n game.teams = [team.ToProto() for team in self.teams]\n game.scores = self.scores\n game.name = self.name\n game.tournament_id_str = self.tournament_id\n game.tournament_name = self.tournament_name\n game.game_status = self.game_status\n game.division = self.division\n game.league = self.league\n game.age_bracket = self.age_bracket\n if self.sources:\n game.last_update_source = self.sources[0].ToProto()\n return game", "def _proto_to_string(self, p: google.protobuf.message.Message) -> str:\n return text_format.MessageToString(p, as_one_line=True)", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _object2proto(self) -> GetGroupMessage_PB:\n return GetGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def _get_source_str(obj):\n # type: (Union[str, Source, Document]) -> str\n if isinstance(obj, str):\n source_str = obj\n elif isinstance(obj, Source):\n source_str = obj.body\n elif isinstance(obj, Document):\n source_str = obj.loc.source.body\n else:\n source_str = \"\"\n # remove new lines, tabs and extra whitespace from source_str\n return re.sub(r\"\\s+\", \" \", source_str).strip()", "def get_source_schema(cls) -> dict:\n source_schema = get_base_schema(\n root=True,\n id_=\"source.schema.json\",\n title=\"Source data schema\",\n description=\"Schema for the source data, files and directories\",\n version=\"0.1.0\",\n )\n for interface_name, data_interface in cls.data_interface_classes.items():\n source_schema[\"properties\"].update({interface_name: unroot_schema(data_interface.get_source_schema())})\n return source_schema", "def SphinxDummySourceClass(source: Any, *args: Any, **kwargs: Any) -> Any:\n return source", "def _object2proto(self) -> CreateGroupMessage_PB:\n return CreateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def render_source(output_dir, package_spec):\n path, name = package_spec.filepath\n destination_filename = '%s/%s.proto' % (output_dir, name)\n pb_template = JENV.get_template(MESSAGES_TEMPLATE_NAME)\n includes = [include[:-5] if include.endswith('.yaml') else include for include in package_spec.includes]\n if 'types' in includes:\n includes.remove('types')\n with open(destination_filename, 'w') as f:\n f.write(pb_template.render(\n name=name,\n package=package_spec.identifier,\n messages=package_spec.definitions,\n includes=includes,\n description=package_spec.description,\n ))", "def parse_pbobject(source, pb_class):\n if isinstance(source, str):\n return open_pbobject(source, pb_class)\n elif isinstance(source, bytes):\n pb_object = pb_class()\n pb_object.ParseFromString(source)\n return pb_object\n else:\n logging.error(f'cannot parse type {type(source)}')", "def _stringify_proto(obj):\n if isinstance(obj, str): return obj\n elif isinstance(obj, Message): return obj.SerializeToString()\n else: raise TypeError('Object can not be serialized as a string.')", "def source():\n\n source = models.Source(name=u\"Joe's Funerals.com\", url=u\"http://www.joesfunerals.com\")\n return source" ]
[ "0.71726424", "0.6312265", "0.57572246", "0.56970835", "0.5353327", "0.5324823", "0.52810025", "0.5244373", "0.51959056", "0.51375407", "0.51266086", "0.51017046", "0.50727355", "0.50727355", "0.5006445", "0.5004341", "0.49543244", "0.48841015", "0.48770934", "0.48703986", "0.48410082", "0.47356263", "0.4732318", "0.47321752", "0.470947", "0.4648941", "0.464857", "0.4639523", "0.46279892", "0.45872542" ]
0.7205786
0
Returns a string that can directly be used to reference this table in SQL.
def get_table_query_string(self) -> str: if self.database and self.table: return f'"{self.database}"."{self.schema}"."{self.table}"' elif self.table: return f'"{self.table}"' else: return f"({self.query})"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def table_name() -> str:\n pass", "def __repr__(self):\n cls_name = self.__class__.__name__\n conn_name = str(self._connection)\n tbl_name = self._table\n return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)", "def __repr__(self):\n cls_name = self.__class__.__name__\n conn_name = str(self._connection)\n tbl_name = self._table\n return '{0}({1}, table={2!r})'.format(cls_name, conn_name, tbl_name)", "def __str__(self):\n return (\n f'{self.__class__.__name__}'\n f'\\n> defined by: {self._str_meta_()}'\n f'\\n> with columns: {self._str_colnames()}'\n f'\\n> {len(self)} objects'\n f'\\n{APtable.__str__(self)}'\n )", "def name(self) -> str:\n return f\"lookup_table_{self.table_number}\"", "def table(self):\n return self._table_name", "def name(self):\n if self.table:\n return \"{}.{}\".format(self.table, self.field_name)\n return self.field_name", "def autoname(self):\n ret = \"%(table)s_%(reftable)s_fkey\"%dict(\n table=self.table.name,\n reftable=self.reftable.name,\n )\n return ret", "def to_sql(self) -> str:\n sql = self.name + ' ' + self.value_type + ' '\n\n if self.primary:\n sql += 'PRIMARY KEY' + ' '\n\n if self.unique:\n sql += 'UNIQUE' + ' '\n\n if self.autoincrement:\n sql += 'AUTOINCREMENT' + ' '\n\n if self.not_null:\n sql += 'NOT NULL' + ' '\n\n return sql", "def table(self):\n return self.reference.table", "def __str__(self):\n tablename = self.tablename()\n attrs = {}\n if Registry.SCHEMAS.has_key(tablename):\n for key in Registry.SCHEMAS[tablename]:\n attrs[key] = getattr(self, key, None)\n return \"<%s object: %s>\" % (self.__class__.__name__, str(attrs))", "def __tablename__(cls):\n return get_table_name(cls.__name__)", "def table_name(self) -> str:\n return self.model._meta.db_table", "def table_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"table_name\")", "def __repr__(self):\n description = [self._str_meta_(), 'columns: '+self._str_colnames()]\n return f'{self.__class__.__name__}({\", \".join(description)})'", "def sql_for_tablespace(self, tablespace, inline=False):\n return \"ON %s\" % self.quote_name(tablespace)", "def __tablename__(cls) -> str:\n return inflection.underscore(cls.__name__)", "def get_table_name(self):\n return self._table", "def to_sql(self) -> str:\n sql = 'FOREIGN KEY(' + self.column\n sql += ') REFERENCES ' + self.target_table\n sql += '(' + self.target_column + ')'\n if self.delete == 'CASCADE':\n sql += ' ON DELETE CASCADE DEFERRABLE INITIALLY DEFERRED'\n return sql", "def __str__(self):\r\n tmp = \"\"\r\n for (name, value) in self.__table__.items():\r\n tmp += str(name) + \"\\n\" + str(value) + \"\\n\"\r\n return(tmp)", "def name(self) -> str:\n return self.fqtable.replace(\".\", \"_\")", "def __str__(self):\n\n table_list = [self.headers]\n\n for row in self.data:\n table_list.append([row[col] or \"\" for col in self.headers])\n\n return create_table_string(table_list)", "def encodeTableName(self, schema, table):\r\n return '\"{}\".\"{}\"'.format(schema, table)", "def table_name(self):\n return self._new_table.name", "def table_name(self) -> str:\n return \"OLTP\"", "def tablename(entity) -> str:\n return entity.__tablename__", "def __str__(self):\n return self.sql()", "def schema_ref(schema, table):\n return schema + '.' + table", "def _table_id(project: str, table: FeatureView) -> str:\n return f\"{project}_{table.name}\"", "def __tablename__(self):\n return sub(r\"(?<!^)(?=[A-Z])\", \"_\", self.__name__).lower()" ]
[ "0.72242546", "0.7145293", "0.7145293", "0.71200347", "0.70594376", "0.70300525", "0.6961341", "0.6907269", "0.6881874", "0.6875941", "0.68361664", "0.68174165", "0.68159", "0.6803258", "0.67897487", "0.67895746", "0.67832047", "0.6771481", "0.6731299", "0.67075336", "0.6672556", "0.6660589", "0.66221374", "0.6607308", "0.6571065", "0.65245473", "0.647171", "0.64346856", "0.64038736", "0.63869065" ]
0.76032066
0
Creates a SnowflakeOptions from a protobuf representation of a snowflake option.
def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions): snowflake_options = cls( database=snowflake_options_proto.database, schema=snowflake_options_proto.schema, table=snowflake_options_proto.table, query=snowflake_options_proto.query, ) return snowflake_options
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_proto(self) -> DataSourceProto.SnowflakeOptions:\n snowflake_options_proto = DataSourceProto.SnowflakeOptions(\n database=self.database,\n schema=self.schema,\n table=self.table,\n query=self.query,\n )\n\n return snowflake_options_proto", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def _pacbio_legacy_option_from_dict(d):\n warnings.warn(\n \"This is obsolete and will disappear soon\",\n DeprecationWarning)\n\n opt_id = d['pb_option']['option_id']\n name = d['pb_option']['name']\n default = d['pb_option']['default']\n desc = d['pb_option']['description']\n option_type_id = to_ascii(d['pb_option']['type'])\n\n # Hack to support \"number\"\n if option_type_id == \"number\":\n option_type_id = \"float\"\n\n return __simple_option_by_type(opt_id, name, default, desc, option_type_id)", "def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)", "def ParseOptions(cls, options, config_object):", "def create_from_pb2(cls, pb2_obj: _BaseOptionsProto) -> 'BaseOptions':\n return BaseOptions(\n model_asset_path=pb2_obj.model_asset.file_name,\n model_asset_buffer=pb2_obj.model_asset.file_content)", "def from_proto(data_source: DataSourceProto):\n return SnowflakeSource(\n field_mapping=dict(data_source.field_mapping),\n database=data_source.snowflake_options.database,\n schema=data_source.snowflake_options.schema,\n table=data_source.snowflake_options.table,\n event_timestamp_column=data_source.event_timestamp_column,\n created_timestamp_column=data_source.created_timestamp_column,\n date_partition_column=data_source.date_partition_column,\n query=data_source.snowflake_options.query,\n )", "def from_json(cls, options_json: Text) -> 'StatsOptions':\n options_dict = json.loads(options_json)\n type_name = options_dict.pop(_TYPE_NAME_KEY, None)\n if type_name is not None and type_name != 'StatsOptions':\n raise ValueError('JSON does not encode a StatsOptions')\n if _SCHEMA_JSON_KEY in options_dict:\n options_dict['_schema'] = json_format.Parse(\n options_dict[_SCHEMA_JSON_KEY], schema_pb2.Schema())\n del options_dict[_SCHEMA_JSON_KEY]\n if _SLICING_CONFIG_JSON_KEY in options_dict:\n options_dict['_slicing_config'] = json_format.Parse(\n options_dict[_SLICING_CONFIG_JSON_KEY],\n slicing_spec_pb2.SlicingConfig())\n del options_dict[_SLICING_CONFIG_JSON_KEY]\n per_feature_weight_override_json = options_dict.get(\n _PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY)\n if per_feature_weight_override_json is not None:\n options_dict['_per_feature_weight_override'] = {\n types.FeaturePath.from_json(k): v\n for k, v in per_feature_weight_override_json.items()\n }\n del options_dict[_PER_FEATURE_WEIGHT_OVERRIDE_JSON_KEY]\n options = cls()\n options.__dict__ = options_dict\n return options", "def to_pb2(self) -> _FaceDetectorGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n return _FaceDetectorGraphOptionsProto(\n base_options=base_options_proto,\n min_detection_confidence=self.min_detection_confidence,\n min_suppression_threshold=self.min_suppression_threshold,\n )", "def initialize_options(self):\n self.proto_path = \"oef-core-protocol\"", "def from_internal_dict(cls, params):\n options = cls({}) # basic default options\n opt_dict = options.__dict__\n\n for key, val in opt_dict.items():\n options.__dict__[key] = params.get(key, val)\n\n return options", "def loads(text):\n values = {}\n for line in text.splitlines():\n line = line.strip()\n if not line or line.startswith(\"#\"):\n continue\n name, value = line.split(\"=\", 1)\n values[name] = value\n return Options(options_values=values)", "def tcp_pkt_parse_options(options: bytes)->dict:\n i = 0\n options_dict = dict()\n opts_rev_mapping = enum_value_to_enum(TCPOptions)\n while i < len(options):\n prefix = options[i]\n i += 1\n if prefix == dpkt.tcp.TCP_OPT_EOL or prefix == dpkt.tcp.TCP_OPT_NOP:\n options_dict[opts_rev_mapping[prefix]] = (0, None)\n else:\n if i < len(options):\n opt_len = options[i]\n i += 1\n if prefix in opts_rev_mapping.keys():\n options_dict[opts_rev_mapping[prefix]] = (opt_len, options[i:i + opt_len - 2])\n else:\n options_dict[UNKNOWN_OPTION_PREFIX + str(prefix)] = (opt_len, options[i:i + opt_len - 2])\n i += opt_len - 2\n else:\n break\n return options_dict", "def from_caffe_solver_protoxt(cls, caffe_solver_prototxt_file: Path):\n solver_param = caffe_pb2.SolverParameter()\n with open(caffe_solver_prototxt_file, 'rt') as f:\n pb2.text_format.Merge(f.read(), solver_param)\n dictionary = {'lr_policy': solver_param.lr_policy,\n 'base_lr': solver_param.base_lr,\n 'gamma': solver_param.gamma,\n 'momentum': solver_param.momentum,\n 'max_iter': solver_param.max_iter,\n 'stepsize': solver_param.stepsize,\n 'stepvalues': solver_param.stepvalue,\n 'weight_decay': solver_param.weight_decay,\n 'iter_size': solver_param.iter_size,\n 'from_prototxt': caffe_solver_prototxt_file}\n return cls(**dictionary)", "def _create_options(self):\n self._OPTIONS = {}", "def from_json(cls, value: str, options: Set[Option] = None):\n doc_raw: dict = json.loads(value)\n return cls.deserialize(doc_raw, options)", "def deconstruct(self):\n name, path, args, kwargs = super(SimpleBitOptionsField,\n self).deconstruct()\n if kwargs['default'] == self.options.maximum_value:\n del kwargs['default']\n kwargs['options'] = self.options.flags\n return name, path, args, kwargs", "def __init__(self):\n super(t_var_size_Options, self).__init__()\n self.options = {\n t_var_size_Options.BOARD_ID : {'value' : '', 'name' : 'board_id' },\n t_var_size_Options.CURRENT_STATE : {'value' : '', 'name' : 'state' },\n t_var_size_Options.PATTERN_WAVE : {'value' : '', 'name' : 'pat_wav' }\n }", "def decode_option(as_bytes: typing.List[int], inner_cl_type: CLType):\n is_defined, rem_bytes = bool(as_bytes[0]), as_bytes[1:]\n\n return decode(inner_cl_type, rem_bytes) if is_defined else None", "def from_dict(cls, dikt) -> 'EavDataAttributeOptionInterface':\n return deserialize_model(dikt, cls)", "def from_json(cls, json_text: Text):\n options = SampleOptions(**json.loads(json_text))\n # JSON parsing produces lists rather than tuples for JSON arrays. Convert\n # these elements into tuples for immutability\n for k, v in options._asdict().items():\n if isinstance(v, list):\n options = options._replace(**{k: tuple(v)})\n return options", "def clean_and_validate_options(self):\n options = self.options\n\n id = options.get('id', None)\n assert(isinstance(id, str) or id is None)\n options['id'] = id\n\n name = options.get('name', None)\n assert(isinstance(name, str) or name is None)\n options['name'] = name\n\n version = options.get('version', None)\n assert(isinstance(version, str) or version is None)\n options['version'] = version", "def options(self, **kwds):\n opts = dict(self.opts)\n for k in kwds:\n try:\n # Ensure that the key exists because we want to change\n # existing options, not add new ones.\n _ = opts[k]\n except KeyError:\n raise ValueError(\"invalid option {!r}\".format(k))\n opts[k] = kwds[k]\n return type(self)(self.cls, opts, self.kwargs)", "def pacbio_option_from_dict(d):\n # This should probably be pushed into pbcommand/pb_io/* for consistency\n # Extensions are supported by adding a dispatch method by looking for\n # required key(s) in the dict.\n if \"choices\" in d and d.get('choices') is not None:\n # the None check is for the TCs that are non-choice based models, but\n # were written with \"choices\" key\n return _pacbio_choice_option_from_dict(d)\n else:\n return _pacbio_option_from_dict(d)", "def parse_options(self, options):\n pass", "def optionxform(self, optionstr):\r\n return optionstr", "def optionxform(self, optionstr):\r\n return optionstr", "def __init__(self, optv):\n self.__p4optv = optv\n # Treat '-g' like '-G' except the marshal'ed Python dicts\n # will be unmarshal'ed.\n if '-g' in self.__p4optv:\n self.__p4optv[self.__p4optv.index('-g')] = '-G'\n self.__unmarshal = 1\n else:\n self.__unmarshal = 0\n # Drop '-s'. 'p4' implements this on the client side and so\n # should 'px' (XXX though it does not yet), so the option should\n # not be passed to the server.\n if '-s' in self.__p4optv:\n self.__p4optv.remove('-s')\n log.warn(\"dropping '-s' option, px cannot yet handle it\")\n _ListCmd.__init__(self)", "def FromProto(cls, proto_obj):\n source = GameSource()\n source.type = proto_obj.type\n if proto_obj.update_time_utc_str:\n source.update_date_time = datetime.strptime(\n proto_obj.update_time_utc_str, tweets.DATE_PARSE_FMT_STR)\n else:\n source.update_date_time = datetime.now()\n if proto_obj.twitter_account:\n source.account_id = long(proto_obj.twitter_account.id_str)\n source.tweet_text = proto_obj.tweet_text\n if proto_obj.score_reporter_url:\n source.score_reporter_url = proto_obj.score_reporter_url\n if not (source.account_id or source.score_reporter_url):\n raise GameModelError('Converting GameSource from malformed proto')\n return source", "def set_options(self, packet, field, value):\n base, option = field.split(\"-\")\n assert base == \"options\", \"Must use an options field with set_options\"\n\n option_type = self.option_str_to_int(option)\n if type(value) == str:\n # Prepare the value for storage in the packet\n value = binascii.unhexlify(value)\n\n # Scapy requires these options to be a tuple - since evaling this\n # is not yet supported, for now, SAck will always be an empty tuple\n if option in [\"sack\"]:\n value = ()\n # These options must be set as integers - if they didn't exist, they can\n # be added like this\n if option in [\"timestamp\", \"mss\", \"wscale\", \"altchksum\", \"uto\"] and not value:\n value = 0\n i = 0\n # First, check if the option is already present in the packet\n for option in self.layer.options:\n # Scapy may try to be helpful and return the string of the option\n next_option = self.option_str_to_int(option[0])\n\n if option_type == next_option:\n packet[\"TCP\"].options[i] = self.format_option(option_type, value)\n break\n i += 1\n # If we didn't break, the option doesn't exist in the packet currently.\n else:\n old_options_array = packet[\"TCP\"].options\n old_options_array.append(self.format_option(option_type, value))\n packet[\"TCP\"].options = old_options_array\n\n # Let scapy recalculate the required values\n del self.layer.chksum\n del self.layer.dataofs\n if packet.haslayer(\"IP\"):\n del packet[\"IP\"].chksum\n del packet[\"IP\"].len\n return True" ]
[ "0.72627205", "0.67112297", "0.6254082", "0.5537946", "0.5431298", "0.54273605", "0.5401342", "0.53541476", "0.53435814", "0.5294465", "0.523745", "0.5237094", "0.5193706", "0.5100491", "0.5098164", "0.50886667", "0.50617826", "0.5013392", "0.50065786", "0.49748728", "0.49595988", "0.4942827", "0.4937329", "0.4932765", "0.49071205", "0.49011543", "0.49011543", "0.4896274", "0.48960942", "0.4892244" ]
0.8055073
0
Converts an SnowflakeOptionsProto object to its protobuf representation.
def to_proto(self) -> DataSourceProto.SnowflakeOptions: snowflake_options_proto = DataSourceProto.SnowflakeOptions( database=self.database, schema=self.schema, table=self.table, query=self.query, ) return snowflake_options_proto
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_proto(cls, snowflake_options_proto: DataSourceProto.SnowflakeOptions):\n snowflake_options = cls(\n database=snowflake_options_proto.database,\n schema=snowflake_options_proto.schema,\n table=snowflake_options_proto.table,\n query=snowflake_options_proto.query,\n )\n\n return snowflake_options", "def to_pb2(self) -> _FaceDetectorGraphOptionsProto:\n base_options_proto = self.base_options.to_pb2()\n base_options_proto.use_stream_mode = (\n False if self.running_mode == _RunningMode.IMAGE else True\n )\n return _FaceDetectorGraphOptionsProto(\n base_options=base_options_proto,\n min_detection_confidence=self.min_detection_confidence,\n min_suppression_threshold=self.min_suppression_threshold,\n )", "def to_proto(self):\n prototxt = str()\n opts = self.options('solver')\n for opt in opts:\n val = self.get('solver',opt)\n prototxt += opt + ': ' + val + '\\n'\n return prototxt", "def to_proto(self) -> None:\n\n pass", "def to_protobuf(self):\n self._validate()\n kwargs = {k: _convert(getattr(self, k), 'to_protobuf')\n for k in self._get_params()}\n return self._protobuf_cls(**kwargs)", "def to_pb2(self) -> _BaseOptionsProto:\n if self.model_asset_path is not None:\n full_path = os.path.abspath(self.model_asset_path)\n else:\n full_path = None\n\n return _BaseOptionsProto(\n model_asset=_ExternalFileProto(\n file_name=full_path, file_content=self.model_asset_buffer))", "def from_proto(cls, hive_options_proto: Any):\n\n pass", "def to_python(self, value):\n if value is None:\n return value\n value = super(BitOptionsField, self).to_python(value)\n return BitOptions(self.options.flags, value)", "def _proto_to_string(self, p: google.protobuf.message.Message) -> str:\n return text_format.MessageToString(p, as_one_line=True)", "def setConfigProtoBytes(self, v):\n return self._set(configProtoBytes=v)", "def _stringify_proto(obj):\n return obj.SerializeToString()", "def to_proto(self) -> DataSourceProto:\n data_source_proto = DataSourceProto(\n type=DataSourceProto.BATCH_SNOWFLAKE,\n field_mapping=self.field_mapping,\n snowflake_options=self.snowflake_options.to_proto(),\n )\n\n data_source_proto.event_timestamp_column = self.event_timestamp_column\n data_source_proto.created_timestamp_column = self.created_timestamp_column\n data_source_proto.date_partition_column = self.date_partition_column\n\n return data_source_proto", "def initialize_options(self):\n self.proto_path = \"oef-core-protocol\"", "def to_proto(self):\n proto = bounding_box_pb2.BoundingBox()\n proto.start.CopyFrom(geom_utils.ToVector3j(self.start))\n proto.size.CopyFrom(geom_utils.ToVector3j(self.size))\n return proto", "def _object2proto(self) -> UpdateGroupMessage_PB:\n return UpdateGroupMessage_PB(\n msg_id=serialize(self.id),\n address=serialize(self.address),\n content=json.dumps(self.content),\n reply_to=serialize(self.reply_to),\n )", "def to_proto(self) -> FeatureSetReferenceProto:\n return self.proto", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)", "def setConfigProtoBytes(self, b):\n return self._set(configProtoBytes=b)" ]
[ "0.7185293", "0.6591346", "0.63469636", "0.6190279", "0.609588", "0.6037695", "0.59587693", "0.57750213", "0.574301", "0.5736975", "0.5731854", "0.5646835", "0.5621101", "0.55236673", "0.5520179", "0.54465526", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715", "0.53726715" ]
0.7964391
0
Given a dict of lang>names, return a default one
def primary_name(names): langs = names.keys() if 'en' in langs: return names['en'] return names[langs[0]]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def localizedWithFallback(field, allowEmpty=True):\n for lang in [''] + FallbackLanguages():\n t = field[lang]\n if allowEmpty:\n if isinstance(t, basestring):\n return t\n elif t:\n return t\n return u\"\"", "def fallback_trans(x):\r\n t = _(x)\r\n if t == x:\r\n l = h.get_lang()\r\n h.set_lang('en', graceful_fail = True)\r\n t = _(x)\r\n if l and l[0] != 'en':\r\n h.set_lang(l[0])\r\n return t", "def get_language(lang_code) -> str:\n langs = defaultdict(lambda: \"en\", {\"ru\": \"ru\"})\n return langs[lang_code.split(\"-\")[0]] if lang_code else \"en\"", "def test_default_translations(self):\n\t\t\n\t\tself.assertTrue(data.get_default_translation('Catholicism', 3) == 'DRA')\n\t\tself.assertTrue(data.get_default_translation('Christianity', 3) == 'ESV')", "def get_default_lang_slug(instance):\n try:\n default_language = settings.LANGUAGES[0][0]\n slug_name = 'slug_%s' % default_language\n return getattr(instance, slug_name, '')\n\n except Exception:\n return ''", "def find(lang):\n try:\n return as_dict(pycountry.languages.lookup(lang))\n except LookupError:\n return {}", "def get_lang(ix):\n\tlang = None\n\tif ix == 0:\n\t\tlang = setting.TLA_ENG\n\telif ix == 1:\n\t\tlang = setting.TLA_JP\n\telse:\n\t\tlang = setting.TLA_VN\n\n\tf = open (f\"lang\\\\{lang}.json\", encoding=setting.TLA_UTF8)\n\tglobal data_json\n\tdata_json = json.load(f)\n\n\treturn lang", "def get_dictionary_default(path):\n if path in defaults_dict.keys():\n return defaults_dict[path]\n else:\n return ''", "def get_default_variant(variants):\n for variant in variants:\n if variant.default:\n return variant", "def test_defaultdict_config():\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n run_multilingual_pipeline(en_has_dependencies=False, fr_has_dependencies=False, lang_configs=lang_configs)\n\n lang_configs = defaultdict(lambda: dict(processors=\"tokenize\"))\n lang_configs[\"en\"] = {\"processors\": \"tokenize,pos,lemma,depparse\"}\n run_multilingual_pipeline(en_has_dependencies=True, fr_has_dependencies=False, lang_configs=lang_configs)", "def init_translations():\n if \"@lang\" in input.load_input():\n lang = input.get_lang()\n try:\n trad = gettext.GNUTranslations(open(\"../course/common_student/$i18n/\" + lang + \".mo\", \"rb\"))\n except FileNotFoundError:\n trad = gettext.NullTranslations()\n trad.install()\n return lang\n trad = gettext.NullTranslations()\n trad.install()\n return \"en\"", "def default_locale(category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> str | None:\n varnames = (category, 'LANGUAGE', 'LC_ALL', 'LC_CTYPE', 'LANG')\n for name in filter(None, varnames):\n locale = os.getenv(name)\n if locale:\n if name == 'LANGUAGE' and ':' in locale:\n # the LANGUAGE variable may contain a colon-separated list of\n # language codes; we just pick the language on the list\n locale = locale.split(':')[0]\n if locale.split('.')[0] in ('C', 'POSIX'):\n locale = 'en_US_POSIX'\n elif aliases and locale in aliases:\n locale = aliases[locale]\n try:\n return get_locale_identifier(parse_locale(locale))\n except ValueError:\n pass\n return None", "def default(cls, category: str | None = None, aliases: Mapping[str, str] = LOCALE_ALIASES) -> Locale:\n # XXX: use likely subtag expansion here instead of the\n # aliases dictionary.\n locale_string = default_locale(category, aliases=aliases)\n return cls.parse(locale_string)", "def _get_lang(self, *args, **kwargs):\n if \"lang\" in kwargs:\n if kwargs[\"lang\"] in self._available_languages:\n self.lang = kwargs[\"lang\"]", "def get_key_recursive(lang_map, lang_code, key_name, default=None):\n key_val = lang_map.get(lang_code, {}).get(key_name, sentinel)\n\n if key_val is not sentinel:\n return key_val\n\n parts = lang_code.split('_')\n parts.pop()\n if not parts:\n return default\n\n _lang_code = '_'.join(parts)\n return get_key_recursive(lang_map, _lang_code, key_name, default)", "def get_default(self, name):\n rargs = [_ for _ in reversed(self.args)]\n rdefaults = [_ for _ in reversed(self.defaults)]\n return rdefaults[rargs.index(name)]", "def defaultLanguage(self, lang=None):\n if(lang is not None):\n self.lang = lang\n return self.lang", "def to_language(arg: str) -> Tuple[Union[str, None], str]: \n if (low:= arg.lower()) in LANGUAGES:\n return arg\n else:\n return LANGCODES.get(low, None)", "def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')", "def get_full_dict(lang):\n\tif not lang:\n\t\treturn {}\n\t# found in local, return!\n\tif getattr(frappe.local, 'lang_full_dict', None) and frappe.local.lang_full_dict.get(lang, None):\n\t\treturn frappe.local.lang_full_dict\n\n\tfrappe.local.lang_full_dict = load_lang(lang)\n\n\treturn frappe.local.lang_full_dict", "def _try_to_get_an_english_value(self, localized_values):\n if not localized_values:\n return None\n\n for localized_value in localized_values:\n if localized_value.language in self.ENGLISH_LANGUAGE_CODES:\n return localized_value.value\n\n return first_or_default(localized_values).value", "def i18n_to_eng(string, map):\r\n\r\n return map.get(string, None)", "def get_word(key: str, language: str):\n if key not in word_keys:\n return \"LOCALIZATION KEY {} NOT FOUND FOR LANGUAGE {}\".format(key, language)\n words = word_keys[key]\n\n # If the word doesn't exist, just show word in English\n if language not in words or words[language] == \"\":\n return words[EN]\n else:\n return words[language]", "def get_language(self, word, lang=None):\n lang = lang or self.cfg.get('lang', 'en')\n # let's retrieve the word from configuration dict.\n try:\n return self.cfg['words_' + lang][word]\n except StandardError:\n return 'Do not know how to \"{}\" in \"{}\"'.format(word, lang)", "def getorelse(self, name, default=None):\n try:\n return self._defaults[name]\n except KeyError:\n return default", "def get_default_language():\n return getattr(thread_locals, 'DEFAULT_LANGUAGE',\n settings.DEFAULT_LANGUAGE)", "def guess_language(lang_list=None):\n\tlang_codes = frappe.request.accept_languages.values()\n\tif not lang_codes:\n\t\treturn frappe.local.lang\n\n\tguess = None\n\tif not lang_list:\n\t\tlang_list = get_all_languages() or []\n\n\tfor l in lang_codes:\n\t\tcode = l.strip()\n\t\tif not isinstance(code, text_type):\n\t\t\tcode = text_type(code, 'utf-8')\n\t\tif code in lang_list or code == \"en\":\n\t\t\tguess = code\n\t\t\tbreak\n\n\t\t# check if parent language (pt) is setup, if variant (pt-BR)\n\t\tif \"-\" in code:\n\t\t\tcode = code.split(\"-\")[0]\n\t\t\tif code in lang_list:\n\t\t\t\tguess = code\n\t\t\t\tbreak\n\n\treturn guess or frappe.local.lang", "def test_find_default(self):\n mute_map = MutableMap(**VALUE)\n\n assert mute_map.find('NOT_VALID', 'default_val') == \\\n 'default_val', 'default should be used'\n assert mute_map.find('str_val', 'default_val') == \\\n VALUE['str_val'], 'default should be ignored'", "def gpwDefaultLanguage(self):\n parent = self.getFolderWhenPortalFactory()\n if hasattr(parent, 'getRawLanguage') and parent.getRawLanguage():\n return parent.getRawLanguage()\n tool = getToolByName(self, 'portal_languages', None)\n if tool is not None:\n return tool.getDefaultLanguage()\n return config.LANGUAGE_DEFAULT", "def getDefault():" ]
[ "0.60933506", "0.60726446", "0.6008112", "0.5990976", "0.59868455", "0.5985267", "0.5922496", "0.590694", "0.58639705", "0.5810866", "0.5780663", "0.5768472", "0.57512575", "0.5717193", "0.5704346", "0.5672763", "0.5649485", "0.56342536", "0.563318", "0.5624843", "0.5548256", "0.5538525", "0.55365795", "0.55344003", "0.55340284", "0.54796356", "0.5478518", "0.5471905", "0.54464394", "0.5413185" ]
0.6597497
0
Initializes an instance of the InstagramBot class.
def __init__(self, username = None, password = None): self.username = config['AUTH']['USERNAME'] self.password = config['AUTH']['PASSWORD'] self.login = config['URL']['LOGIN'] self.nav_url = config['URL']['NAV'] self.tag_url = config['URL']['TAGS'] self.direct_url = config['URL']['DM'] self.driver = webdriver.Chrome(config['ENVIRONMENT']['CHROMEDRIVER']) self.stay_logged = False self.api = InstagramAPI(self.username, self.password)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def start(self):\r\n self._instagram_api = InstagramAPI(mongo_api=self._mongo_api)\r\n self._inst_run()", "def __init__(self, bot=BNBot):\n self.bot = bot", "def __init__(self, client_id=None, access_token=None):\r\n if not client_id and not access_token:\r\n raise TypeError('__init__() must be passed at least one '\r\n 'of client_id, access_token')\r\n\r\n self.apiroot = 'https://api.instagram.com/v1'\r\n\r\n self.client_id = client_id\r\n self.access_token = access_token\r\n self.add_filter(self.add_authorization)", "def __init__(self, bot):\n self.bot = bot", "def __init__(self, bot):\n self.bot = bot", "def instagram(self, instagram):\n\n self._instagram = instagram", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, bot: DreamBot) -> None:\n\n self.bot = bot", "def __init__(self, username, password, bot, channel):\n super().__init__(username, password)\n\n self.queue = deque()\n self.ingame_cog = Ingame(bot)\n\n self.bot = bot\n self.channel = channel\n self.chat_breakout = False\n self.loop = asyncio.get_event_loop()\n self.ingame_cog.is_pycraft_instance = True", "def __init__(self, mongo_api, cnn_model):\r\n self._mongo_api = mongo_api\r\n self._cnn_model = cnn_model\r\n\r\n self._instagram_api = None", "def __init__(self, config):\n self._slack_client = self._connect(config[\"slack_bot_token\"])\n self.bot_id = self._get_user_id()\n self.default_channel = config[\"default_channel\"]", "def __init__(self):\n self.site = pywikibot.Site(u'commons', u'commons')\n self.generator = self.getGenerator()", "def __init__(self):\n\n # configure logging\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n self.logger = logging.getLogger(\"chaturbate\")\n self.logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n self.logger.addHandler(console_handler)\n\n # read configuration\n self.config_parser = ConfigParser.ConfigParser()\n self.config_parser.read(\"config.ini\")\n\n # is pushbullet is enabled on the config\n if self.config_parser.get('PushBullet', 'enable') == 'true':\n # try to import it and connect\n try:\n import pushbullet\n self.push_bullet = pushbullet.Pushbullet(\n self.config_parser.get('PushBullet', 'access_token'))\n except (ImportError, pushbullet.InvalidKeyError):\n self.push_bullet = None\n\n # create a requests object that has sessions\n self.req = requests.Session()\n\n self.username = self.config_parser.get('User', 'username')\n self.password = self.config_parser.get('User', 'password')", "def initialize(self):\n self.voteskips = []\n self.response = {}\n self.route = {}\n self.userlist = []\n self.poll = []\n self.media = []\n self.init = False\n self.question = None\n self.jumble = None\n self.imgur = None", "def __init__(self, address=('', 50000), authkey=b'tradingbot'):\n _ClientBot.__init__(self, address=address, authkey=authkey)", "def __init__(self):\n auth = tweepy.OAuthHandler(consumer_key, consumer_secret)\n auth.set_access_token(access_token, access_token_secret)\n self.api = tweepy.API(auth)", "def __init__(self):\n\n # This environment variable should be set before using the bot\n self.token = os.environ['STATS_BOT_TOKEN']\n\n\n # These will be checked against as substrings within each\n # message, so different variations are not required if their\n # radix is present (e.g. \"all\" covers \"/all\" and \"ball\")\n self.menu_trigger = ['/all', '/stats']\n self.loan_stats_trigger = ['/loans']\n self.il_trigger = ['/IL']\n self.assets_trigger = ['/assets']\n\n\n # Stops runtime if the token has not been set\n if self.token is None:\n raise RuntimeError(\n \"FATAL: No token was found. \" + \\\n \"You might need to specify one or more environment variables.\")\n\n # Configures logging in debug level to check for errors\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',\n level=logging.INFO)", "def __init__(self):\n log.msg(\"Initializing Twitch parser.\")\n\n # initialize our data members\n self.streams = tuple()\n self.crc32 = 0", "def __init__(self):\n super().__init__()\n\n # Will only reply to every 3rd or so tweet, defined in settings\n self.received_tweet_count = 0\n\n # Twitter api init\n self.auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\n self.auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)\n self.twitter_api = tweepy.API(self.auth)\n\n print('Authenticated, creating stream...')\n\n self._init_stream()", "def __init__(self):\n\n self.db = ImageDB()\n self.vitess = VitessConn()\n self.minio = MinioConn()", "def __init__(self, imag_bot, block_, player_loc):\n self._bot = imag_bot\n self._block = block_\n self._player_loc = player_loc", "def __init__(self) -> None:\n super().__init__()\n self.handler = IxnHandler()", "def init(self, sevabot):\n\n logger.debug(\"GiantbombHandler init\")\n self.sevabot = sevabot\n self.skype = sevabot.getSkype()", "def init_bot():\n\n # We create the Reddit instance.\n reddit = praw.Reddit(client_id=config.APP_ID, client_secret=config.APP_SECRET,\n user_agent=config.USER_AGENT, username=config.REDDIT_USERNAME,\n password=config.REDDIT_PASSWORD)\n\n # Check if we have the 3 required arguments.\n if len(sys.argv) == 3:\n\n method = sys.argv[1]\n day = sys.argv[2]\n\n if method == \"sticky\":\n if day == \"monday\":\n post_monday(reddit)\n elif day == \"wednesday\":\n post_wednesday(reddit)\n elif day == \"friday\":\n post_friday(reddit)\n\n elif method == \"unsticky\":\n if day == \"monday\":\n unsticky_post(reddit, MONDAY_FILE)\n elif day == \"wednesday\":\n unsticky_post(reddit, WEDNESDAY_FILE)\n elif day == \"friday\":\n unsticky_post(reddit, FRIDAY_FILE)", "def __init__(self):\n self.emotions_list = EmotionsList('NRC-Emotion-Intensity-Lexicon-v1.txt')\n self.tweets_list = None\n self.nickname = None", "def __init__(self, bot: BunkBot, channels: ChannelService):\r\n self.bot: BunkBot = bot\r\n self.message: Message = None\r\n self.channels: ChannelService = channels\r\n self.yt_result: YoutubeResult = YoutubeResult()\r\n self.yt_link: str = \"\"", "def __init__(self, bot: commands.Bot):\n\n super().__init__(bot)\n\n # Init instance vars\n self.cookie_data = self._parse_cookie_data()\n self.cookie_available = False\n self.cookie_prepared_timestamp = None\n self.cookie_drop_delay_hours = None\n self.cookie_drop_delay_minutes = None\n self.cookie_type = None", "def __init__(self, config):\n self.config = config\n\n self.slack_client = SlackClient(self.config.SLACK_TOKEN)", "def __init__(self, *, specified_loop=None):\n intents = discord.Intents(\n members=True,\n presences=True,\n guilds=True,\n emojis=True,\n invites=True,\n messages=True,\n reactions=True,\n voice_states=True,\n )\n loop = asyncio.get_event_loop()\n session = aiohttp.ClientSession(loop=loop)\n\n # Load all the environment variables\n load_dotenv(\"config/Bot/token.env\")\n load_dotenv(\"config/Apis/tokens.env\")\n load_dotenv(\"config/Database/db.env\")\n\n # Read the emoji file\n self.emoji_config = CustomEmojis.from_json(read_file(\"config/General/emojis.json\"))\n # Read the config file\n self.config = Config.from_json(read_file(\"config/General/config.json\"))\n\n # Set the HTTPException error codes dict to a custom property for easy access\n self.httpexception_codes = load_json(\"assets/data/httpexception_codes.json\", make_keys_int=True)\n\n # We save the bot start time to a variable\n self.started_at = datetime.datetime.utcnow()\n\n # APIs\n self.cleverbot = async_cleverbot.Cleverbot(\n os.environ[\"cleverbot\"],\n session=session,\n context=async_cleverbot.DictContext(),\n )\n self.dagpi = asyncdagpi.Client(os.environ[\"dagpi\"])\n self.google_api = async_cse.Search(os.environ[\"google_search\"], session=session)\n self.translate_api = aiogoogletrans.Translator()\n self.aki = Akinator()\n self.apis = [\"OMDB\", \"tenor\", \"owlbot\", \"gender_api\", \"nasa\"]\n self.api_keys = {api: os.environ[api.lower()] for api in self.apis}\n\n # For the snipe command\n self.snipes = {}\n\n # For tracking commands\n self.command_uses = {}\n\n # For api requests\n self.session = session\n\n super().__init__(\n command_prefix=get_prefix,\n case_insensitive=True,\n intents=intents,\n session=session,\n loop=specified_loop or loop,\n strip_after_prefix=True,\n owner_ids=self.config.owner_ids,\n )\n\n # For before_invoke\n self._before_invoke = self.before_invoke\n # For blacklisted check\n self._checks.append(self.bot_check)", "def init(self) -> None:\n ..." ]
[ "0.7040264", "0.6685926", "0.66698956", "0.6649861", "0.6649861", "0.6602354", "0.6402114", "0.6402114", "0.62316155", "0.614077", "0.6138122", "0.60492367", "0.60134923", "0.5993596", "0.59222096", "0.59110945", "0.5900702", "0.58524567", "0.5842189", "0.5841665", "0.58260006", "0.5819154", "0.5818025", "0.57928646", "0.57815546", "0.57677644", "0.5766395", "0.5744598", "0.5727863", "0.57224864" ]
0.75979745
0
Method gets a list of users who like a post
def get_likes_list(self, username): api = self.api api.searchUsername(username) result = api.LastJson username_id = result['user']['pk'] #Gets the user ID user_posts = api.getUserFeed(username_id) # gets the user feed result = api.LastJson media_id = result['items'][0]['id'] #gets the most recent post api.getMediaLikers(media_id) #gets users who liked users = api.LastJson('users') for user in users: #appends the users to the list users_list.append({'pk':user['pk'], 'username':user['username']})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_user_likes(self, data_base):\n cursor = data_base.cursor(dictionary=True)\n cursor.execute(f\"SELECT user_id FROM user_like WHERE post_id = {self.id}\")\n user_likes = tuple(map(lambda x: str(x['user_id']), cursor.fetchall()))\n if not user_likes:\n return []\n cursor.execute(f\"SELECT username FROM user WHERE id IN ({', '.join(user_likes)})\")\n users = cursor.fetchall()\n cursor.close()\n return list(map(lambda x: x['username'], users))", "def get_all_likes(obj):\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def get_users_from_likes(self, type, owner_id, item_id):\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=1)\n likes = self.vk.likes.getList(type=type, owner_id=owner_id, item_id=item_id, count=likes['count'])\n return likes['items']", "def like_user_posts(self, user:str, n_posts:int, like:bool=True):\n\n action = 'Like' if like else 'Unlike'\n\n self._nav_user(user)\n\n imgs = []\n elements = self._find_element(EC.presence_of_all_elements_located((By.CLASS_NAME, '_9AhH0')))\n imgs.extend(elements)\n\n for img in imgs[:n_posts]:\n img.click() \n time.sleep(1) \n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action)).click()\n except Exception as e:\n LOGGER.error(e)\n\n self.driver.find_elements_by_class_name('ckWGn')[0].click()", "def get_meals_user_liked(username):\n meals_user_liked = []\n user_liked = Rating.objects.filter(member__username=username, like=True)\n for ratting in user_liked:\n meals_user_liked.append(ratting.meal)\n return meals_user_liked", "def get_user_posts(self, request):\n post_objects = Post.objects.filter(liked_users__id=request.user.id)\n avg_user_liked_post_weight = self.__avg_user_interested_post_weight(post_objects)\n queryset = self.__user_interested_post_filter(avg_user_liked_post_weight)\n context = {'user':request.user}\n serializer = PostSerializer(queryset, many=True, context=context)\n return Response({'data': serializer.data}, status=status.HTTP_200_OK)", "def latest_likes(self, user, number_posts, likes):\n WAIT = 1\n if likes:\n action = 'Like'\n else:\n action = 'Unlike'\n self.nav_user(user)\n image_container = []\n image_container.extend(self.driver.find_elements_by_class_name('_9AhH0'))\n for image in image_container[:number_posts]:\n image.click()\n time.sleep(WAIT)\n try:\n self.driver.find_element_by_xpath(\"//*[@aria-label='{}']\".format(action).click())\n except Exception as e:\n print(e)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[2]/div/article/div[3]/section[1]/span[1]/button')[0].click() # clicks the heart symbol\n time.sleep(WAIT)\n self.driver.find_elements_by_xpath('/html/body/div[4]/div[3]/button')[0].click() #Makes sure to close out of current picture\n time.sleep(WAIT)\n \n # Tested\n users_list = []\n def get_likes_list(self, username):\n \"\"\"\n Method gets a list of users who like a post\n\n \"\"\"\n api = self.api\n api.searchUsername(username) \n result = api.LastJson\n username_id = result['user']['pk'] #Gets the user ID\n user_posts = api.getUserFeed(username_id) # gets the user feed\n result = api.LastJson\n media_id = result['items'][0]['id'] #gets the most recent post\n api.getMediaLikers(media_id) #gets users who liked\n users = api.LastJson('users')\n for user in users: #appends the users to the list\n users_list.append({'pk':user['pk'], 'username':user['username']})", "def get_users_who_liked_object(*, obj: 'Model'):\n ct = ContentType.objects.get_for_model(obj)\n\n return (\n User.objects\n .filter(\n likes__content_type=ct,\n likes__object_id=obj.pk\n )\n )", "def get_followers_likes(self, followers_likes):\n user_followers = []\n\n followers = self.get_new_followers()\n if len(followers) < followers_likes:\n user_followers = self.get_followers()\n self.self_followers = deepcopy(user_followers)\n user_followers = [i['pk'] for i in user_followers.get('users', []) if i['pk'] not in followers]\n\n if user_followers:\n if len(user_followers) > followers_likes - len(followers):\n followers.extend([random.choice(user_followers) for _ in range(followers_likes - len(followers))])\n else:\n followers.extend(user_followers)\n else:\n followers = [random.choice(followers) for _ in range(followers_likes)]\n\n followers_media_ids = [self.random_user_media(i) for i in followers]\n\n if len(followers_media_ids) < followers_likes and user_followers:\n while len(followers_media_ids) < followers_likes:\n u = random.choice(user_followers)\n rm = self.random_user_media(u)\n if rm and rm not in followers_media_ids:\n followers_media_ids.append(rm)\n\n return followers_media_ids", "def is_liked(obj, user) ->bool:\n\tif not user.is_authenticated:\n\t\treturn False\n\tobj_type = ContentType.objects.get_for_model(obj):\n\tlikes = Like.objects.filter(\n\t\tcontent_type = obj_type, object_id=obj.id, user=user)\n\treturn likes.exists()\n\n\tdef get_all_likes(obj):\n\t\t\"\"\"\n\t\t\tGets all users, who liked object\n\t\t\"\"\"\n\t\tobj_type = ContentType.objects.get_for_model(obj)\n\t\treturn User.objects.filter(\n\t\t\tlikes_content_type=obj_type, likes_object_id=obj.id)", "def like(request, post_id):\n if request.method == \"PUT\":\n liked = None\n user = request.user\n post = Post.objects.get(id=post_id)\n # If user already liked, decrement the like count and remove as 'liker'\n if user in post.liked_by.all():\n post.liked_by.remove(user)\n post.likes -= 1\n post.save()\n liked = False\n # Else increase like count and add user\n else:\n post.liked_by.add(user)\n post.likes += 1\n post.save()\n liked = True\n # Return data for updating dynamically with javascript\n return JsonResponse({\"total_likes\": post.likes, \"liked\": liked})", "def get_likes(self):\n source, edge = self.id, \"likes\"\n return User.graph().get_connections(source, edge, limit=100000)[\"data\"]", "def show_likes(user_id):\n\n\n user = User.query.get_or_404(user_id)\n\n return render_template('users/likes.html', user=user)", "def get_user_liked(user, status):\n return models.Favorite.objects.filter(user=user, status=status).exists()", "def getLikeCommentInfos(self, user, listado_infos, filtrar_like):\n listado_infos_likes = []\n if filtrar_like:\n for elemento in listado_infos:\n try:\n LikeInfo.objects.get(info=elemento, usuario=user)\n info = {}\n info[\"info\"] = elemento\n info[\"like\"] = LikeInfo.objects.filter(info=elemento).count()\n info[\"comment\"]=Comments.objects.filter(page=elemento).count()\n if not user.is_authenticated():\n LikeInfo.objects.get(info=elemento, usuario=user)\n info[\"likeInfo\"] = True\n else:\n info[\"likeInfo\"] = False\n listado_infos_likes.append(info)\n except ObjectDoesNotExist, e:\n pass\n else:\n for elemento in listado_infos:\n info = {}\n info[\"info\"] = elemento\n info[\"like\"] = LikeInfo.objects.filter(info=elemento).count()\n info[\"comment\"] = Comments.objects.filter(page=elemento).count()\n try:\n if user.is_authenticated():\n LikeInfo.objects.get(info=elemento, usuario=user)\n info[\"likeInfo\"] = True\n else:\n info[\"likeInfo\"] = False\n except ObjectDoesNotExist, e:\n info[\"likeInfo\"] = False\n listado_infos_likes.append(info)\n return listado_infos_likes", "def likes(self):\n return self.get_queryset().filter(vote__gt=0)", "def liked_by(self, user):\n return Likes.objects.filter(recipe=self, chef=user).exists()", "def get_queryset(self):\n user: User = self.request.user\n following_users = user.profile.following.all()\n return Post.objects.filter(author__in=following_users).order_by('created')", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def get_user_has_liked(self, instance):\n request = self.context.get(\"request\")\n return instance.likes.filter(pk=request.user.pk).exists()", "def like(self, data_base, user):\n cursor = data_base.cursor()\n cursor.execute(f\"UPDATE post SET likes = likes + 1 WHERE id = '{self.id}'\") # Increments the likes\n cursor.execute(f\"INSERT INTO user_like (user_id, post_id) VALUES ({user.id}, {self.id})\")\n if self.commit_to_db:\n data_base.commit()\n cursor.close()", "def post(self):\n liked = self.request.get('like')\n unliked = self.request.get('unlike')\n post_id = self.request.get('post_id')\n post = Posts.get_by_id(int(post_id))\n user = self.get_active_user()\n user_id = int(user.key().id())\n\n if liked:\n if user_id in post.liked_by:\n self.render_improper_endpoint_access(\"like\")\n else:\n if post.submitter_id != user_id:\n post.liked_by.append(user.key().id())\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(403)\n elif unliked:\n if user_id in post.liked_by:\n index = post.liked_by.index(user_id)\n del post.liked_by[index]\n post.put()\n self.redirect('/%s' % str(post.key().id()))\n else:\n self.error(500)", "def like_following(self):\n self.logger.log(\"starting like_following...\")\n count_following = self.account.follows_count\n follows_accounts = self.following\n random.shuffle(follows_accounts)\n for acc in follows_accounts:\n acc = perform_with_ran_delay(self.instagram.get_account_by_id, acc)\n self.logger.log(\" {} > {} posts\".format(acc.username, acc.media_count))\n if acc.media_count > 0:\n\n posts = perform_with_ran_delay(self.instagram.get_medias, acc.username, 50)\n if posts:\n for m in posts:\n try:\n perform_with_ran_delay(self.instagram.like, m.identifier)\n self.logger.log(\"liking 1 post from \"+acc.username)\n random_delay()\n except Exception as e:\n self.logger.log(\"skipping 1 post from \"+acc.username)\n self.logger.log(e)\n random_delay()\n continue", "def get_likes(self, obj):\n return QuestionPersonLike.objects.filter(question=obj,\n like=True).count()", "def users_likes(user_id):\n\n if not g.user:\n flash(\"Access unauthorized.\", \"danger\")\n return redirect(\"/\")\n\n users_blocking = [block.user_blocking_id for block in Blocks.query.all() if block.user_being_blocked_id == g.user.id]\n # likes = Message.query.filter(Message.user_id.notin_(users_blocking)).all()\n user = User.query.get_or_404(user_id)\n likes = [message for message in user.likes if message.user_id not in users_blocking]\n return render_template('users/likes.html', user=user, likes=likes)", "def like(self, request, pk=None):\n\n user_wall_post = self.get_object()\n user_wall_post.likes.add(self.request.user)\n to_user = user_wall_post.owner\n from_user = request.user\n\n UserNotification.create_post_friend_liked_notification(from_user, to_user, 'Right', id=pk)\n return Response(status=201)", "def fans(self, request, pk=None):\n\n obj = self.get_object()\n users_list = like_func.get_liked_users(obj)\n serializer = UserSerializer(users_list, context={'request': request}, many=True)\n return Response(serializer.data)", "def get_meals_user_disliked(username):\n meals_user_disliked = []\n user_disliked = Rating.objects.filter(member__username=username, like=False)\n for ratting in user_disliked:\n meals_user_disliked.append(ratting.meal)\n return meals_user_disliked", "def prepare_process_like_and_follow(self):\n follow = []\n media = []\n unfollow = []\n\n coef = self.users_to_follow / self.limits_per_hour.get('follow', 1)\n media_to_like = round(coef*self.limits_per_hour.get('like'))\n num_to_unfollow = round(coef*self.limits_per_hour.get('unfollow'))\n\n feed_likes = media_to_like // 2\n feed_likes_list = []\n following_likes = round((media_to_like // 2) * 3 / 4)\n following_likes_list = []\n followers_likes = media_to_like - feed_likes - following_likes\n\n monitored_ids = [i[\"user\"] for i in self.monitored_users]\n\n for posts in self.hashtag_feed_list(self.search_hashtags):\n if len(follow) < self.users_to_follow:\n for m in posts:\n if self.check_if_suit(m):\n user_id, username = self.get_user_from_post(m)\n if user_id and user_id not in [i[\"user\"] for i in follow] \\\n and user_id not in monitored_ids:\n follow.append({'user': user_id, 'username': username})\n following_likes_list.append(m)\n\n if len(follow) >= self.users_to_follow:\n break\n\n for p in following_likes_list:\n if p in posts:\n posts.remove(p)\n\n if feed_likes > 0:\n if len(posts) > feed_likes:\n feed_likes_list.extend([i['id'] for i in (random.choice(posts) for _ in range(feed_likes))\n if i['id'] not in feed_likes_list])\n else:\n feed_likes_list.extend([i['id'] for i in posts[:feed_likes] if i['id'] not in feed_likes_list])\n feed_likes -= len(feed_likes_list)\n if feed_likes <= 0:\n if len(follow) >= self.users_to_follow:\n break\n if len(follow) >= self.users_to_follow and feed_likes <= 0:\n break\n\n media.extend(feed_likes_list)\n\n if len(following_likes_list) < following_likes:\n followings = []\n get_n_followings = following_likes - len(following_likes_list)\n if following_likes_list:\n following_likes_list = [self.get_media_id_from_post(i) for i in following_likes_list]\n following_likes_list.extend([i for i in self.get_following_likes(followings, get_n_followings)\n if i and i not in media])\n media.extend(following_likes_list)\n else:\n media.extend([self.get_media_id_from_post(i) for i in following_likes_list[:following_likes]])\n\n media.extend([i for i in self.get_followers_likes(followers_likes) if i and i not in media])\n\n unfollow = self.get_to_unfollow(unfollow, num_to_unfollow)\n\n return follow, media, unfollow", "def like(request, pk):\n update_posts_expiration()\n post = 0;\n post = Post.objects.get(id=pk)\n #if the post is expired you user can't like it\n if post.is_expired == True:\n return Response(\"You can't interact with a expired post\")\n else:\n #if the post is not expired then increment the likes count by 1 and save a serializer of like to the database with user and post information\n if request.method == \"POST\":\n if request.user != post.user:\n post.likes_count += 1\n post.save()\n serializer = LikeSerializer(data=request.data)\n if serializer.is_valid():\n serializer.save(user=request.user, liked_post=post)\n return Response(\"you liked the post with title: \" + post.title)\n else:\n return Response(serializer.errors)\n else:\n return Response(\"you can't like your own post\")\n \n\n return Response(\"you didn't like the post yet\")" ]
[ "0.80609584", "0.7395297", "0.69443375", "0.6895322", "0.68074024", "0.67923427", "0.6753025", "0.6566023", "0.6552493", "0.63974077", "0.639381", "0.6380745", "0.6334056", "0.6326958", "0.6302191", "0.62897587", "0.62885493", "0.62881505", "0.6275099", "0.6275099", "0.6238257", "0.62229425", "0.6217055", "0.6210115", "0.62097657", "0.6204797", "0.6191636", "0.6151198", "0.61173654", "0.6113968" ]
0.76121527
1
Load sample images for image manipulation. Loads both, ``china`` and ``flower``. Returns
def load_sample_images(): # Try to import imread from scipy. We do this lazily here to prevent # this module from depending on PIL. try: try: from scipy.misc import imread except ImportError: from scipy.misc.pilutil import imread except ImportError: raise ImportError("The Python Imaging Library (PIL) " "is required to load data from jpeg files") ROOT_Dir = os.getcwd() module_path = os.path.join(ROOT_Dir, "images") with open(os.path.join(module_path, 'README.txt')) as f: descr = f.read() filenames = [os.path.join(module_path, filename) for filename in os.listdir(module_path) if filename.endswith(".jpg")] # Load image data for each image in the source folder. images = [imread(filename) for filename in filenames] return Bunch(images=images, filenames=filenames, DESCR=descr)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def _preload_all_samples(self):\n if self.mode in ['train_noval', 'train_with_val']:\n\n self._images_train, self._labels_train = [], []\n desc = \"Loading train image pairs & flows\"\n with tqdm(total=len(self._img_trn_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_trn_path):\n pbar.update(1)\n label_path = self._lbl_trn_path[n]\n image, label = self._load_sample(image_path, label_path)\n self._labels_train.append(label)\n self._images_train.append(image)\n\n if self.mode == 'train_with_val':\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n if self.opts['tb_test_imgs'] is True:\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))\n\n elif self.mode in ['val', 'val_notrain']:\n\n self._images_val, self._labels_val = [], []\n desc = \"Loading val image pairs & flows\"\n with tqdm(total=len(self._img_val_path), desc=desc, ascii=True, ncols=100) as pbar:\n for n, image_path in enumerate(self._img_val_path):\n pbar.update(1)\n label_path = self._lbl_val_path[n]\n image, label = self._load_sample(image_path, label_path, preprocess=False)\n self._labels_val.append(label)\n self._images_val.append(image)\n\n elif self.mode == 'test':\n self._images_test = []\n desc = \"Loading test samples\"\n with tqdm(total=len(self._img_tst_path), desc=desc, ascii=True, ncols=100) as pbar:\n for image_path in self._img_tst_path:\n pbar.update(1)\n self._images_test.append(self._load_sample(image_path, preprocess=False))", "def load_sample(self):\n\n self.load_images(self.folder + \"/sampleSet.txt\")\n self.load_traces(self.folder + \"/sampleLabel.txt\")", "def load_test_images(images):\n loaded = {}\n for description, _ in images.items():\n loaded[description] = load_from_netcdf(description)\n return loaded", "def load_images(self):\n images_list = [os.path.join(self.root, image['file_name'])\n for image in self.data['images']]\n\n if self.shuffle:\n random.shuffle(images_list)\n images_list = images_list[:self.max_samples] if self.max_samples is not None and self.max_samples <= len(\n images_list) else images_list\n\n return images_list", "def load_scraped_food_images(ROOT):\n Xtr, Ytr = load_food_image_batch(os.path.join(ROOT, 'train'),50000)\n Xte, Yte = load_food_image_batch(os.path.join(ROOT, 'test'),10000)\n return Xtr, Ytr, Xte, Yte", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def init_datasets(self, display_samples = False):\n print(\"==> Loading images from \", self.img_dir)\n self.image_data_gen = ImageDataGenerator(\n rescale=1./255,\n #rotation_range=30,\n #shear_range=30,\n #width_shift_range=.15,\n #height_shift_range=.15,\n #zoom_range=0.5,\n validation_split=0.2)\n\n self.train_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='training')\n\n self.val_dataset = self.image_data_gen.flow_from_directory(\n batch_size=BATCH_SIZE,\n directory=self.img_dir,\n shuffle=True,\n target_size=self.img_size,\n class_mode='sparse',\n subset='validation')\n\n if display_samples:\n self.display_sample_images()", "def load_images(filename):\n images = _load(filename)\n #_info_image(image, title=os.path.basename(filename))\n return images", "def load_images(file):\n\timage_list = [] # List for storing all the images\n\ttargets = []\n\t\n\tfor filename in glob.glob(file + '/*.png'):\n\t\t# ==================\n\t\t# Reading the image\n\t\t# ==================\n\t\timage = scipy.misc.imread(filename).astype(np.float32)\n\t\t\n\t\t# ================================\n\t\t# Converting the image to a vector\n\t\t# ================================\n\t\timage = image.flatten() # (784, )\n\t\t\n\t\t# ==============================\n\t\t# Normalizing the image to numpy\n\t\t# ==============================\n\t\timage = image / 255.0\n\t\timage = image - 0.5\n\t\timage = image * 2.0\n\t\t\n\t\t# ===============================\n\t\t# Appending the image to the list\n\t\t# ===============================\n\t\timage_list.append(image)\n\t\t\n\t\t_, value = filename.split('\\\\')\n\t\t# print(value[0])\n\t\ttargets.append(int(value[0]))\n\t\n\timage_list = np.array(image_list)\n\ttargets = np.array(targets)\n\t\n\t# ================================================\n\t# \t\t\tShuffling the data\n\t# ================================================\n\timage_list, targets = shuffle(image_list, targets)\n\t\n\ttrain_images, test_images, train_targets, test_targets = split(image_list, targets)\n\treturn train_images, test_images, train_targets, test_targets", "def load_data():\n dirname = os.path.join('datasets', 'fashion-mnist')\n base = 'http://fashion-mnist.s3-website.eu-central-1.amazonaws.com/'\n files = [\n 'train-labels-idx1-ubyte.gz', 'train-images-idx3-ubyte.gz',\n 't10k-labels-idx1-ubyte.gz', 't10k-images-idx3-ubyte.gz'\n ]\n\n paths = []\n for fname in files:\n paths.append(get_file(fname, origin=base + fname, cache_subdir=dirname))\n\n with gzip.open(paths[0], 'rb') as lbpath:\n y_train = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[1], 'rb') as imgpath:\n x_train = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_train), 28, 28)\n\n with gzip.open(paths[2], 'rb') as lbpath:\n y_test = np.frombuffer(lbpath.read(), np.uint8, offset=8)\n\n with gzip.open(paths[3], 'rb') as imgpath:\n x_test = np.frombuffer(\n imgpath.read(), np.uint8, offset=16).reshape(len(y_test), 28, 28)\n\n return (x_train, y_train), (x_test, y_test)", "def load_images(input_dir=\"/tmp/mapswipe/project-1\", n_images=2000, seed=1):\n class_map = {1: \"1\", 0: \"5\"}\n output_dir = \"/Users/thead/git/dreamview/data/\"\n\n X_ = []\n y_ = []\n for new_klass in class_map:\n images = []\n for klass in class_map[new_klass]:\n for img in glob.glob(input_dir + \"/%s/*/*/*/aerial.jpeg\" % klass):\n if os.stat(img).st_size > 0:\n images.append(img)\n\n images = shuffle(images, random_state=seed+42+new_klass)\n images = images[:n_images]\n X_ += images\n y_ += [new_klass] * len(images)\n\n # XXX deduce array size from an actual image\n X = np.zeros((2*n_images, 256*256), dtype=np.ubyte)\n y = np.zeros(2*n_images, dtype=np.int)\n\n for n, (img_path, klass) in enumerate(zip(X_, y_)):\n # the order of these OPs has been chosen on purpose, don't mess\n # without checking what happens\n img = imread(img_path)\n img = equalize_adapthist(img)\n img = rgb2grey(img)\n img = img_as_ubyte(img)\n\n if not n % 10:\n fname = os.path.split(img_path)[:-1]\n fname = os.path.join(*fname, \"aerial-processed.jpeg\")\n imsave(fname, img)\n\n X[n,:] = img.ravel()\n y[n] = klass\n\n return X, y", "def load_isolated_images(Args):\n # load first galaxy images\n name = 'first_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y1 = load_images(filename, ['i'], Args)\n # load second galaxy images\n name = 'second_gal_band_wldeb_noise.fits'\n filename = os.path.join(out_dir, Args.model, name)\n Y2 = load_images(filename, ['i'], Args)\n Y = {'Y1': Y1,\n 'Y2': Y2}\n return Y", "def three_sample_images():\n samples = samples_path()\n _truck = np.array(Image.open(os.path.join(samples, \"truck.png\")))\n _deer = np.array(Image.open(os.path.join(samples, \"deer.png\")))\n _frog = np.array(Image.open(os.path.join(samples, \"frog.png\")))\n truck = transforms.ToTensor()(_truck)\n deer = transforms.ToTensor()(_deer)\n frog = transforms.ToTensor()(_frog)\n return torch.stack([truck, deer, frog])", "def load_images():\n print(\"[+] UPDATE - Begin loading images\")\n\n colors = [\"w\", \"b\"]\n piece_types = [\"p\", \"R\", \"N\", \"B\", \"K\", \"Q\"]\n for color in colors:\n for type in piece_types:\n piece = color + type\n IMAGES[piece] = p.transform.scale(p.image.load(\"images/\" + piece + \".png\"), (SQ_SIZE, SQ_SIZE))\n\n print(\"[+] UPDATE - Images loaded\")", "def load(self):\n\n # get files in folder\n files = [f for f in listdir(self.data_path)]\n print(\"loading images from folder: %s\" % self.data_path)\n\n images = []\n image_targets = []\n for f in files:\n filepath = path.join(self.data_path, f)\n images.append(io.imread(filepath, as_grey=True))\n image_targets.append(self.target)\n\n # define new size and resize images\n new_size = (2 ** self.size_exponent, 2 ** self.size_exponent)\n for i in range(0, len(images)):\n # images[i] = transform.resize(images[i], new_size)\n images[i] = misc.imresize(images[i], new_size) / 16\n\n self.images = images\n self.targets = image_targets", "def loadImages(files, targets):\n images = []\n for file in files:\n targets.append(file)\n images.append(snd.imread(file))\n return images, targets", "def load_dataset():\n # Get the start time\n start_time = time.time()\n\n # Load dataset YAML file\n # This contains all of our image labels, as well as locations of the images themself\n print(\"Reading dataset/dataset.yaml... \", end=\"\")\n with open(\"dataset/dataset.yaml\", \"r\") as file:\n dataset = yaml.safe_load(file)\n\n # Get paths, labels\n paths = []\n labels = []\n for sample in dataset:\n # Assign a \"1\" label if we're looking at the ground\n # 0 for everything else: trees, buildings, cars, etc\n label_semantic = max(sample[\"labels\"].keys(), key=sample[\"labels\"].get)\n if max(sample[\"labels\"].values()) < 0.80:\n # Samples that are not obviously in any one category: unsafe\n label=0\n elif label_semantic == \"GROUND\":\n # Safe if >80% ground\n label = 1\n else:\n # Unsafe otherwise, this is usually water\n label = 0\n\n paths.append(sample[\"path\"])\n labels.append(label)\n print(\"done!\", flush=True)\n\n print(\"Loading images\", end=\"\")\n # Get images\n images = np.zeros((len(paths), 128, 128, 3), dtype=np.float32)\n progress = 0.0\n for i, path in enumerate(paths):\n images[i] = np.array(PIL.Image.open(path).resize((128, 128))) / 255.0\n if i / len(paths) > progress:\n progress += 1.0 / 20.0\n print(\".\", end=\"\", flush=True)\n print(\" done!\")\n labels = np.array(labels, dtype=np.int)\n\n # Return\n print(f\"Loaded {len(images)} images in {time.time() - start_time} seconds!\")\n return images, labels", "def load_sample_image(image_name):\n images = load_sample_images()\n index = None\n for i, filename in enumerate(images.filenames):\n if filename.endswith(image_name):\n index = i\n break\n if index is None:\n raise AttributeError(\"Cannot find sample image: %s\" % image_name)\n return images.images[index]", "def load_labeled_data():\n\n images = []\n labels = []\n\n for i in range(1, 10):\n path = (\"selflabeled\", str(i), \"*.jpg\")\n filenames = glob.glob(\"/\".join(path))\n images_one_type = [cv2.imread(img) for img in filenames]\n labels_one_type = [i] * len(images_one_type)\n images += images_one_type\n labels += labels_one_type\n\n return images, labels", "def _load_images(paths):\n assert isinstance(paths, list)\n _R_MEAN = 123.68\n _G_MEAN = 116.78\n _B_MEAN = 103.94\n\n # allocate memory\n images = np.zeros([len(paths), FLAGS.target_height, FLAGS.target_width, 3],\n dtype=np.float32)\n\n # load all images\n pbar = ProgressBar(max_value=len(paths))\n for i in range(len(paths)):\n img = sio.imread(paths[i])\n\n # resize images\n img = sresize(img, (FLAGS.target_height, FLAGS.target_width, 3),\n mode='constant', preserve_range=True)\n\n # store images\n images[i] = img.astype(np.float32)\n pbar.update(i)\n\n # mean removal\n images -= [_R_MEAN, _G_MEAN, _B_MEAN]\n return images", "def load_image_data():\n print(\"Loading image data...\")\n label_dict = get_label_vectors()\n categories = [c for c in os.listdir('images/') if c[0] != '.'] # ignore\n labels = [] # instantiate list for image labels\n data = [] # instantiate list for image data\n for i in categories:\n path = 'images/{}/'.format(i) # define path to category folder\n for j in os.listdir(path): # get images from category folder\n labels.append(label_dict[i]) # append label vector\n data.append(cv2.imread(path + j).flatten()) # append flattened image data\n\n labels = np.array(labels) # convert lists to array\n data = np.array(data)\n print(\"Done.\")\n\n return labels, data", "def load_images(path, p=1, feature=None, transform=None):\n\n images = os.listdir(path)\n images = random.sample(images, math.ceil(len(images) * p))\n\n loaded = [\n load_image(\n os.path.join(path, img),\n feature=feature, transform=transform)\n for img in images]\n\n return np.array([x for x in loaded if x is not None])", "def load_images(self):\n self.img_paths = sorted(glob(self.img_pattern))\n self.imgs = []\n for idx, this_path in enumerate(self.img_paths):\n try:\n this_img = cv2.imread(this_path)\n if self.downscale > 1:\n this_img = cv2.resize(this_img, (0, 0),\n fx=1/float(self.downscale),\n fy=1/float(self.downscale),\n interpolation=cv2.INTER_LINEAR)\n except Exception as e:\n print(\"error loading img: %s\" % (this_path))\n if this_img is not None:\n self.imgs.append(this_img)\n print(\"loaded img %d size=(%d,%d): %s\" %\n (idx, this_img.shape[0], this_img.shape[1], this_path))\n print(\"loaded %d images\" % (len(self.imgs)))", "def load_images(test_data_dir, image_size = (300, 300)):\n # loop over the input images\n images_data = []\n labels = []\n imagePaths = sorted(list(paths.list_images(test_data_dir)))\n for imagePath in imagePaths:\n # load the image, pre-process it, and store it in the data list\n image = cv2.imread(imagePath)\n image = cv2.resize(image, image_size)\n image = img_to_array(image)\n images_data.append(image)\n\n # extract the class label from the image path and update the\n # labels list\n label = imagePath.split(os.path.sep)[-2]\n labels.append(label)\n return images_data, sorted(labels)", "def load_test_data(image_path):\n raw = []\n image_filename = dict()\n count = 0\n for filename in glob.glob(image_path):\n name = os.path.basename(filename)[:-4]\n try:\n im = Image.open(filename)\n im = im.convert('L')\n im = im.resize((img_rows, img_cols))\n raw.append(np.array(im))\n image_filename[count] = name\n count += 1\n im.close()\n except IOError:\n print('Error loading image ', filename)\n return [raw, image_filename]", "def loadDataset(dataset):\n # List of images.\n images = []\n\n\n\n # Read all filenames from the dataset.\n for filename in dataset:\n # Read the input image.\n image = cv2.imread(filename)\n\n # Add the current image on the list.\n if image is not None: \n images.append(image)\n else:\n print(\"Could not read file: {}\".format(filename))\n sys.exit()\n\n # Return the images list.\n return images", "def load_examples_data(dataset_name):\n dataset_name = dataset_name.strip().lower()\n if dataset_name.lower() not in ['pokemon', 'hanzi', 'animals', 'nsfw', 'simpsons', 'horse2zebra', 'people',\n 'autodrive', 'superresolution', 'anpr', 'beauty','antisproofing','facelandmarks','dogs-vs-cats','chinese']:\n raise ValueError('Not a valid dataset_name.')\n dataset_name = 'examples_' + dataset_name\n dirname = os.path.join(_trident_dir, dataset_name)\n if not os.path.exists(dirname):\n try:\n os.makedirs(dirname)\n except OSError:\n # Except permission denied and potential race conditions\n # in multi-threaded environments.\n pass\n is_internet_ok = is_connected()\n if dataset_name == 'examples_pokemon':\n is_download=download_file_from_google_drive('1U-xc54fX9j9BcidvRa0ow6qjssMlSF2A', dirname, 'pokemon.tar')\n tar_file_path = os.path.join(dirname, 'pokemon.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n extract_path = os.path.join(dirname, 'pokemon')\n dataset = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n print('get pokemon images :{0}'.format(len(dataset)))\n return dataset\n\n\n elif dataset_name == 'examples_hanzi':\n download_file_from_google_drive('13UEzSG0az113gpRPKPyKrIE2HDaA2P4H', dirname, 'hanzi.tar')\n tar_file_path = os.path.join(dirname, 'hanzi.tar')\n extract_path = os.path.join(dirname, 'hanzi')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, os.path.join(dirname, 'train'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset_test = load_folder_images(dataset_name, os.path.join(dirname, 'test'), folder_as_label=True,\n object_type=ObjectType.gray)\n\n dataset.testdata = dataset_test.traindata\n dataset.class_names['zh-cn'] = dataset.class_names['en-us']\n return dataset\n\n elif dataset_name == 'examples_animals':\n download_file_from_google_drive('19Cjq8OO6qd9k9TMZxlPjDpejDOdiHJoW', dirname, 'animals.tar')\n tar_file_path = os.path.join(dirname, 'animals.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n dataset = load_folder_images(dataset_name, dirname, folder_as_label=True)\n return dataset\n elif dataset_name == 'examples_nsfw':\n tar_file_path = os.path.join(dirname, 'nsfw.tar')\n if os.path.exists(tar_file_path) and get_file_create_time(tar_file_path)<datetime.datetime(2021, 2, 20, 0, 0, 0).timestamp():\n os.remove(tar_file_path)\n if os.path.exists(os.path.join(dirname,'porn_detection_data.pkl')):\n os.remove(os.path.join(dirname,'porn_detection_data.pkl'))\n _delete_h(dirname)\n download_file_from_google_drive('1EXpV2QUrSFJ7zJn8NqtqFl1k6HvXsUzp', dirname, 'nsfw.tar')\n\n extract_path = os.path.join(dirname, 'nsfw')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n folders = ['drawings', 'hentai', 'neutral', 'porn', 'sexy']\n data=unpickle(os.path.join(dirname,'porn_detection_data.pkl'))\n\n trainData = []\n testData = []\n trainLabel = []\n testLabel = []\n for n in range(5):\n folder=folders[n]\n trainData.extend(data[folder]['train'])\n trainLabel.extend([n]*len(data[folder]['train']))\n testData.extend(data[folder]['test'])\n testLabel.extend([n] * len(data[folder]['test']))\n\n trainarray = ImageDataset(trainData,object_type=ObjectType.rgb)\n trainlabel = LabelDataset(trainLabel,object_type=ObjectType.classification_label)\n train_iter = Iterator(data=trainarray, label=trainlabel)\n\n testarray = ImageDataset(testData,object_type=ObjectType.rgb)\n testlabel = LabelDataset(testLabel,object_type=ObjectType.classification_label)\n test_iter = Iterator(data=testarray, label=testlabel)\n print('training images: {0} test images:{1}'.format(len(trainarray), len(testarray)))\n\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n dataset.binding_class_names(['drawing', 'hentai', 'neutral', 'porn', 'sexy'], 'en-us')\n dataset.binding_class_names(['绘画', '色情漫画', '中性', '色情', '性感'], 'zh-cn')\n dataset.binding_class_names(['繪畫', '色情漫畫', '中性', '色情', '性感'], 'zh-tw')\n dataset.scenario = 'train'\n return dataset\n elif dataset_name == 'examples_simpsons':\n download_file_from_google_drive('1hGNFbfBv3EZ4nx4Qod6PtSYzO8H4QIxC', dirname, 'simpsons.tar')\n tar_file_path = os.path.join(dirname, 'simpsons.tar')\n extract_path = os.path.join(dirname, 'simpsons')\n extract_archive(tar_file_path, extract_path, archive_format='tar')\n data_provider = load_folder_images(dataset_name, extract_path, folder_as_label=False)\n data_provider.traindata.unpair = RandomNoiseDataset(shape=(100), random_mode='normal')\n print('get simpsons images :{0}'.format(len(data_provider.traindata.data.items)))\n return data_provider\n elif dataset_name == 'examples_horse2zebra':\n download_file_from_google_drive('1pqj-T90Vh4wVNBV09kYZWgVPsZUA2f7U', dirname, 'horse2zebra.tar')\n tar_file_path = os.path.join(dirname, 'horse2zebra.tar')\n extract_path = os.path.join(dirname, 'horse2zebra')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n trainA = ImageDataset(list_images(os.path.join(dirname, 'trainA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n trainB = ImageDataset(list_images(os.path.join(dirname, 'trainB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testA = ImageDataset(list_images(os.path.join(dirname, 'testA')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n testB = ImageDataset(list_images(os.path.join(dirname, 'testB')), object_type=ObjectType.rgb,\n get_image_mode=GetImageMode.processed)\n train_iter = Iterator(data=trainA, unpair=trainB)\n test_iter = Iterator(data=testA, unpair=testB)\n dataset = DataProvider(dataset_name, traindata=train_iter, testdata=test_iter)\n print('get horse2zebra images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_people':\n download_file_from_google_drive('1H7mJJfWpmXpRxurMZQqY4N_UXWLbQ2pT', dirname, 'people.tar')\n tar_file_path = os.path.join(dirname, 'people.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'imgs', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs=list(sorted(imgs))\n masks = list(sorted(masks))\n # make_dir_if_need(os.path.join(dirname, 'trimap'))\n # for i in range(len(masks)):\n # mask=mask2array(masks[i])\n # trimap=mask2trimap(mask)\n # save_mask(trimap,masks[i].replace('masks','trimap'))\n # print('trimap',len(masks))\n\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb)\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.binary_mask)\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get people images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_autodrive':\n download_file_from_google_drive('1JqPPeHqhWLqnI6bD8nuHcVx-Y56oIZMK', dirname, 'autodrive.tar')\n tar_file_path = os.path.join(dirname, 'autodrive.tar')\n extract_path = os.path.join(dirname, 'autodrive')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, 'images', '*.*g'))\n masks = glob.glob(os.path.join(dirname, 'masks', '*.png'))\n imgs = list(sorted(imgs))\n masks = list(sorted(masks))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb,symbol='image')\n mskdata = MaskDataset(masks=masks, object_type=ObjectType.color_mask,symbol='mask')\n\n def parse_code(l):\n if len(l.strip().split(\"\\t\")) == 2:\n a, b = l.replace('\\t\\t', '\\t').strip().split(\"\\t\")\n return tuple(int(i) for i in b.split(' ')), a\n\n label_codes, label_names = zip(\n *[parse_code(l) for l in open(os.path.join(dirname, \"label_colors.txt\")).readlines()])\n for i in range(len(label_codes)):\n mskdata.palette[label_names[i]] = label_codes[i]\n\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=mskdata))\n print('get autodrive images :{0}'.format(len(dataset)))\n return dataset\n elif dataset_name == 'examples_superresolution':\n download_file_from_google_drive('1v1uoymrWI_MLSiGvSGW7tWJYSnzzXpEQ', dirname, 'superresolution.tar')\n tar_file_path = os.path.join(dirname, 'superresolution.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs.extend(glob.glob(os.path.join(dirname, '*.bmp')))\n imgs = list(sorted(imgs))\n\n print('get super resolution images :{0}'.format(len(imgs)))\n\n imgdata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='lr')\n labeldata = ImageDataset(images=imgs * 2, object_type=ObjectType.rgb, symbol='hr')\n dataset = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=labeldata))\n return dataset\n elif dataset_name == 'examples_beauty':\n download_file_from_google_drive('1aJhxN9IqsxuayhRTm-gmxk6PiLe5wm9X', dirname, 'beauty.tar')\n tar_file_path = os.path.join(dirname, 'beauty.tar')\n\n extract_archive(tar_file_path, dirname, archive_format='tar')\n # 讀取圖片數據\n images_dict = {}\n with open(os.path.join(dirname, 'images_dict.pkl'), 'rb') as fp:\n images_dict = pickle.load(fp)\n\n f = open(os.path.join(dirname, 'All_Ratings.txt'), encoding='utf-8-sig').readlines()\n imgs = []\n landmarks = []\n ratings = []\n for row in f:\n data = row.strip().split('\\t')\n if 'images\\\\' + data[0] in images_dict:\n img = images_dict['images\\\\' + data[0]][0]\n img = img.transpose([2, 0, 1])[::-1].transpose([1, 2, 0])\n imgs.append(img)\n landmark = images_dict['images\\\\' + data[0]][1].astype(np.float32)\n landmarks.append(landmark)\n rating = (float(data[1])) / 5.00\n ratings.append(rating)\n print('{0} faces loaded...'.format(len(imgs)))\n imgdata = ImageDataset(images=imgs, object_type=ObjectType.rgb, symbol='faces')\n landmarkdata = LandmarkDataset(landmarks=landmarks, object_type=ObjectType.landmarks, symbol='target_landmarks')\n labeldata = LabelDataset(data=ratings,object_type=ObjectType.classification_label, symbol='target_beauty')\n data_provider = DataProvider(dataset_name=dataset_name, traindata=Iterator(data=imgdata, label=Dataset.zip(landmarkdata,labeldata)))\n return data_provider\n\n elif dataset_name == 'examples_facelandmarks':\n download_file_from_google_drive('1GtswQBAHPa_bXaB4tW2uOOQ8Lxfz2L5B', dirname, 'ibug_300W.tar')\n tar_file_path = os.path.join(dirname, 'ibug_300W.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n root_dir=os.path.join(dirname, 'ibug_300W_large_face_landmark_dataset')\n image_paths = {}\n landmarks = {}\n crops = {}\n\n for mode in ['train','test']:\n make_dir_if_need(os.path.join(dirname, 'crops',mode))\n tree = ElementTree.parse(os.path.join(root_dir, 'labels_ibug_300W_{0}.xml'.format(mode)))\n root = tree.getroot()\n image_paths[mode]=[]\n landmarks[mode] = []\n crops[mode] = []\n\n offset=5\n for j in tqdm(range(len(root[2]))):\n try:\n filename=root[2][j]\n landmark = []\n for num in range(68):\n x_coordinate = int(filename[0][num].attrib['x'])\n y_coordinate = int(filename[0][num].attrib['y'])\n landmark.append([x_coordinate, y_coordinate])\n landmark=np.asarray(landmark)\n\n crop = filename[0].attrib\n for k in crop.keys():\n crop[k] = int(crop[k]) if isinstance(crop[k], str) else crop[k]\n for k in crop.keys():\n if k=='top' and int(landmark[:,1].min())<int(crop[k]):\n crop[k] = int( landmark[:,1].min())\n crop[ 'height']+=crop[k]-int(landmark[:,1].min())\n elif k=='left' and int(landmark[:,0].min())<int(crop[k]):\n crop[k] = int( landmark[:,0].min())\n crop['width']+= crop[k] - int(landmark[:, 0].min())\n elif k == 'width' and int(landmark[:, 0].max()-landmark[:, 0].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 0].max()-landmark[:, 0].min())\n elif k == 'height' and int(landmark[:, 1].max()-landmark[:, 1].min()) > int(crop[k]):\n crop[k] = int(landmark[:, 1].max()-landmark[:, 1].min())\n\n crop['left']-=offset\n crop['top'] -= offset\n crop['width'] += 2*offset\n crop['height'] += 2*offset\n\n\n landmark[:,0]-=crop['left']\n landmark[:, 1] -= crop['top']\n\n\n if not os.path.exists(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j))):\n im=image2array(os.path.join(root_dir, filename.attrib['file']))\n if im.ndim==2:\n im=cv2.cvtColor(im,cv2.COLOR_GRAY2RGB)\n im=im[crop['top']:min(crop['top']+crop['height'],im.shape[0]),crop['left']:min(crop['left']+crop['width'],im.shape[1]),:]\n\n if max(im.shape[:2])/max(min(im.shape[:2]),0)<=5:\n\n array2image(im).save(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops', mode, '{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n del im\n else:\n #im = image2array(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n image_paths[mode].append(os.path.join(dirname, 'crops',mode,'{0}.png'.format(j)))\n crops[mode].append(crop)\n landmarks[mode].append(landmark)\n\n if j%100==0:\n gc.collect()\n except Exception as e:\n pass\n\n print('ibug 300w train dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['train']),len(landmarks['train'])))\n print('ibug 300w test dataset: images: {0} landmarks:{1} \\n'.format(len(image_paths['test']), len(landmarks['test'])))\n imdata=ImageDataset(images=image_paths['train'],symbol='faces',object_type=ObjectType.rgb)\n landmarkdata = LandmarkDataset(landmarks=landmarks['train'], symbol='landmarks',object_type=ObjectType.landmarks)\n imtestdata = ImageDataset(images=image_paths['test'], symbol='faces',object_type=ObjectType.rgb)\n landmarktestdata = LandmarkDataset(landmarks=landmarks['test'], symbol='landmarks',object_type=ObjectType.landmarks)\n data_provider=DataProvider(traindata=Iterator(data=imdata,label=landmarkdata),testdata=Iterator(data=imtestdata,label=landmarktestdata))\n return data_provider\n\n elif dataset_name == 'examples_antisproofing':\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name,os.path.join(dirname, 'antisproofing'))\n return data_provider\n elif dataset_name == 'examples_anpr':\n download_file_from_google_drive('1uGBd8tXlP0TZAXNgrR6H0jl5MXj7VPbN', dirname, 'anpr.tar')\n tar_file_path = os.path.join(dirname, 'anpr.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n imgs = glob.glob(os.path.join(dirname, '*.*g'))\n imgs = list(sorted(imgs))\n\n # CCPD (Chinese City Parking Dataset, ECCV) and PDRC (license Plate Detection and Recognition Challenge)\n # https://github.com/detectRecog/CCPD\n provinces = [\"皖\", \"沪\", \"津\", \"渝\", \"冀\", \"晋\", \"蒙\", \"辽\", \"吉\", \"黑\", \"苏\", \"浙\", \"京\", \"闽\", \"赣\", \"鲁\", \"豫\", \"鄂\", \"湘\", \"粤\",\n \"桂\", \"琼\", \"川\", \"贵\", \"云\", \"藏\", \"陕\", \"甘\", \"青\", \"宁\", \"新\", \"警\", \"学\", \"O\"]\n alphabets = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',\n 'W', 'X', 'Y', 'Z', 'O']\n ads = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K', 'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W',\n 'X', 'Y', 'Z', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'O']\n\n def lp2char(lp):\n cols = lp.split('_')\n charstring = ''\n for i in range(len(cols)):\n if i == 0:\n charstring += provinces[int(cols[i])]\n elif i == 1:\n charstring += alphabets[int(cols[i])]\n else:\n charstring += ads[int(cols[i])]\n return charstring\n\n width = 720\n height = 1160\n for im_path in imgs:\n lbl = im_path.split('/')[-1].rsplit('.', 1)[0].split('-')[-3]\n charstring = lp2char(lbl)\n iname = im_path.rsplit('/', 1)[-1].rsplit('.', 1)[0].split('-')\n [leftUp, rightDown] = [[int(eel) for eel in el.split('&')] for el in iname[2].split('_')]\n box = [leftUp[0], leftUp[1], rightDown[0], rightDown[1]]\n ori_w, ori_h = [float(int(el)) for el in [width, height]]\n new_labels = [(leftUp[0] + rightDown[0]) / (2 * ori_w), (leftUp[1] + rightDown[1]) / (2 * ori_h),\n (rightDown[0] - leftUp[0]) / ori_w, (rightDown[1] - leftUp[1]) / ori_h]\n download_file_from_google_drive('1e7Zjn2MHNCvA5gXdJUECzY8NjK4KVpa7', dirname, 'antisproofing.tar')\n tar_file_path = os.path.join(dirname, 'antisproofing.tar')\n make_dir_if_need(os.path.join(dirname, 'antisproofing'))\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, os.path.join(dirname, 'antisproofing'))\n return data_provider\n\n\n\n elif dataset_name == 'examples_dogs-vs-cats':\n download_file_from_google_drive('10czW0On7eIXkPP-MuQ-IRxMWdTizWjNC', dirname, 'dogs-vs-cats.tar')\n tar_file_path = os.path.join(dirname, 'dogs-vs-cats.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n data_provider = load_folder_images(dataset_name, dirname)\n return data_provider\n elif dataset_name == 'examples_chinese':\n to_half=ToHalfWidth()\n to_sc=ChineseConvert(convert_to='simplified')\n download_file_from_google_drive('1yzRzXpLuhSUxnixqCgpbdTk16ajnTEWF', dirname, 'chinese.tar')\n tar_file_path = os.path.join(dirname, 'chinese.tar')\n extract_archive(tar_file_path, dirname, archive_format='tar')\n\n as_train = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_training.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000' ,'|'))).splitlines()\n cityu_train =remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_training.utf8'), encoding='utf-8-sig').read().strip().replace(' ','|'))).splitlines()\n\n as_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'as_testing_gold.utf8'), encoding='utf-8-sig').read().strip().replace('\\u3000', '|'))).splitlines()\n cityu_test = remove_nonprintable(to_half(codecs.open(os.path.join(dirname, 'cityu_test_gold.utf8'), encoding='utf-8-sig').read().strip().replace(' ', '|'))).splitlines()\n\n\n data = as_train + cityu_train # 把兩個語料合併\n test_data=as_test + cityu_test # 把兩個語料合併\n\n\n raw_data_train = [row.strip('\\n').strip('\\r') for row in data] # 移除分行字元\n raw_data_test = [row.strip('\\n').strip('\\r') for row in test_data] # 移除分行字元\n\n process_data_train=[]\n process_seg_label_train = []\n process_simplifided_label_train = []\n process_traditional_label_train = []\n\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n for k in tqdm(range(len(raw_data_train))):\n row=raw_data_train[k]\n if row.startswith('∥'):\n row=row[1:]\n words=row.replace('||','|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_train.append(word[i])\n #tmp_simplifided_label_train.append(to_half(to_sc(word[i])))\n #轉換為BMES\n\n if i==0 and len(word)>1: #B 是一個詞的開始\n tmp_seg_label_train.append('B')\n elif i==len(word)-1 and len(word)>=2 and tmp_seg_label_train[-1] in ['B','M']: #E 是一個詞的結束\n tmp_seg_label_train.append('E')\n elif len(word)==1 and i==0: #S 自己就是一個單詞\n tmp_seg_label_train.append('S')\n elif len(word)>=3 and tmp_seg_label_train[-1] in ['B','M']: #M 是一個詞的中間\n tmp_seg_label_train.append('M')\n\n if len(tmp_seg_label_train)>0 and tmp_seg_label_train[-1] in ['E','S']:\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)) and k+1<len(words):\n if word in [ '。','﹖']:\n pass\n\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_train.append(' ')\n tmp_seg_label_train.append('S')\n\n if (k+1<len(raw_data_train) and not raw_data_train[k+1].startswith( '」')) and words[-1] in [ '。','﹖']:\n #process_traditional_label_train.append(tmp_data_train)\n\n tmp_data_train=to_half(''.join(tmp_data_train))\n tmp_seg_label_train = ''.join(tmp_seg_label_train)\n # if len(tmp_data_train)!=len(tmp_seg_label_train):\n # print('')\n tmp_simplifided_label_train =to_sc(tmp_data_train)\n\n process_data_train.append(tmp_data_train)\n process_seg_label_train.append(tmp_seg_label_train)\n process_simplifided_label_train.append(tmp_simplifided_label_train)\n tmp_data_train = []\n tmp_seg_label_train = []\n tmp_simplifided_label_train = []\n tmp_pronunce_label_train = []\n # else:\n # tmp_data_train.append('\\n')\n # tmp_simplifided_label_train.append('\\n')\n # tmp_seg_label_train.append('\\n')\n corpus=process_data_train\n seg_corpus=process_seg_label_train\n simplifided_corpus =process_simplifided_label_train\n\n process_data_test = []\n process_seg_label_test = []\n process_simplifided_label_test = []\n process_traditional_label_test = []\n print('generate test labels')\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n for k in tqdm(range(len(raw_data_test))):\n row=raw_data_test[k]\n if row.startswith('∥'):\n row=row[1:]\n words = row.replace('||', '|').split('|')\n for k in range(len(words)):\n\n word = words[k]\n\n for i in range(len(word)):\n tmp_data_test.append(word[i])\n # tmp_simplifided_label_test.append(to_half(to_sc(word[i])))\n # 轉換為BMES\n\n if i == 0 and len(word) > 1: # B 是一個詞的開始\n tmp_seg_label_test.append('B')\n elif i == len(word) - 1 and len(word) >= 2 and tmp_seg_label_test[-1] in ['B', 'M']: # E 是一個詞的結束\n tmp_seg_label_test.append('E')\n elif len(word) == 1 and i == 0: # S 自己就是一個單詞\n tmp_seg_label_test.append('S')\n elif len(word) >= 3 and tmp_seg_label_test[-1] in ['B', 'M']: # M 是一個詞的中間\n tmp_seg_label_test.append('M')\n\n if len(tmp_seg_label_test) > 0 and tmp_seg_label_test[-1] in ['E', 'S'] and k+1<len(words):\n if len(word) > 1 and (is_alphabet(word) or is_punctuation(word)):\n if word in ['。', '﹖']:\n pass\n elif random.random() < 0.6 or is_alphabet(word):\n tmp_data_test.append(' ')\n tmp_seg_label_test.append('S')\n\n if (k + 1 < len(raw_data_test) and not raw_data_test[k + 1].startswith('」')) and words[-1] in ['。', '﹖']:\n # process_traditional_label_test.append(tmp_data_test)\n\n tmp_data_test = to_half(''.join(tmp_data_test))\n tmp_seg_label_test = ''.join(tmp_seg_label_test)\n # if len(tmp_data_test)!=len(tmp_seg_label_test):\n # print('')\n tmp_simplifided_label_test = to_sc(tmp_data_test)\n\n process_data_test.append(tmp_data_test)\n process_seg_label_test.append(tmp_seg_label_test)\n process_simplifided_label_test.append(tmp_simplifided_label_test)\n tmp_data_test = []\n tmp_seg_label_test = []\n tmp_simplifided_label_test = []\n tmp_pronunce_label_test = []\n # else:\n # tmp_data_test.append('\\n')\n # tmp_simplifided_label_test.append('\\n')\n # tmp_seg_label_test.append('\\n')\n test_corpus = process_data_test\n test_seg_corpus = process_seg_label_test\n test_simplifided_corpus = process_simplifided_label_test\n\n\n data=TextSequenceDataset(corpus=corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_label = TextSequenceDataset(corpus=seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_label = TextSequenceDataset(corpus=simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_label = TextSequenceDataset(corpus= copy.deepcopy(corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n data_test=TextSequenceDataset(corpus=test_corpus,sequence_length=64,sequence_start_at='section_start',object_type=ObjectType.corpus,symbol='input')\n seg_test_label = TextSequenceDataset(corpus=test_seg_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='seg_label')\n simplifided_test_label = TextSequenceDataset(corpus=test_simplifided_corpus,sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='simplified_label')\n traditional_test_label = TextSequenceDataset(corpus= copy.deepcopy(test_corpus), sequence_length=64, sequence_start_at='section_start', object_type=ObjectType.sequence_label,symbol='traditional_label')\n\n\n chars = list(sorted(set(list( ''.join(corpus) +bpmf_phonetic+'\\n\\r\\t∥'+ ''.join(simplifided_corpus)+''.join(test_data)))))\n chars.insert(0, '[CLS]')\n chars.insert(1, '[SEP]')\n chars.insert(2, '[UNK]')\n chars.insert(3, '[PAD]')\n chars.insert(4, '[MASK]')\n\n data.vocabs =data_test.vocabs=simplifided_label.vocabs=simplifided_test_label.vocabs = chars\n data.text2index=data_test.text2index =simplifided_label.text2index=simplifided_test_label.text2index = dict((c, i) for i, c in enumerate(chars))\n data.index2text =data_test.index2text =simplifided_label.index2text=simplifided_test_label.index2text= dict((i, c) for i, c in enumerate(chars))\n traditional_label = copy.deepcopy(data)\n traditional_test_label = copy.deepcopy(data_test)\n traditional_label.object_type =traditional_test_label.object_type = ObjectType.sequence_label\n traditional_label.symbol =traditional_test_label.symbol = 'traditional_label'\n\n mask_label = copy.deepcopy(data)\n mask_test_label = copy.deepcopy(data_test)\n #mask_label.object_type =mask_test_label.object_type= ObjectType.corpus\n mask_label.symbol = mask_test_label.symbol = 'mask_label'\n\n\n\n nextword=copy.deepcopy(data)\n nextword_test = copy.deepcopy(data_test)\n nextword.object_type=nextword_test.object_type=ObjectType.sequence_label\n nextword.symbol=nextword_test.symbol='nextword_label'\n nextword.sequence_offset=nextword_test.sequence_offset=1\n\n label=ZipDataset(seg_label,nextword,simplifided_label,traditional_label,mask_label)\n label_test = ZipDataset(seg_test_label, nextword_test, simplifided_test_label, traditional_test_label, mask_test_label)\n provider=TextSequenceDataProvider(\n traindata=Iterator(data=data,label=label),\n testdata=Iterator(data=data_test,label=label_test))\n return provider\n #,sample_filter=lambda x:x[0][-1]==3\n else:\n return None", "def loadImages(self):\n for map_name, img in self.maps.items():\n if img is None or map_name not in __class__.input_tr:\n continue\n getCyclesImage(img)", "def initImages(self):\n pass" ]
[ "0.6988628", "0.6685474", "0.66373193", "0.66370726", "0.6426989", "0.6391403", "0.63717645", "0.62725484", "0.62609285", "0.62502795", "0.6230983", "0.62293315", "0.6186863", "0.6182871", "0.61326575", "0.6113", "0.6112325", "0.6069663", "0.6068178", "0.6067434", "0.6062822", "0.6061903", "0.6044588", "0.6026369", "0.6022848", "0.5999245", "0.59884995", "0.59814626", "0.5973293", "0.59697825" ]
0.7011758
0
Recreate the (compressed) image from the code book & labels
def recreate_image(codebook, labels, w, h): d = codebook.shape[1] image = np.zeros((w, h, d)) label_idx = 0 for i in range(w): for j in range(h): image[i][j] = codebook[labels[label_idx]] label_idx += 1 return image
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h):\r\n d = codebook.shape[1]\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0\n for i in range(w):\n for j in range(h):\n image[i][j] = codebook[int(labels[label_idx])]\n label_idx += 1\n return image", "def recreate_image(codebook, labels, w, h, d):\r\n image = np.zeros((w, h, d))\r\n label_idx = 0\r\n for i in range(w):\r\n for j in range(h):\r\n image[i][j] = codebook[labels[label_idx]]\r\n label_idx += 1\r\n return image", "def recreate_image(codebook, labels, w, h):\n d = codebook.shape[1]\n image = np.zeros((w, h, d))\n label_idx = 0 # 第几个原始图像的像素点\n for i in range(w):\n for j in range(h):\n # 获取原始数据像素点对应的类别中心点坐标\n # 再根据中心点坐标获取对应的像素值\n image[i][j] = codebook[labels[label_idx]]\n label_idx += 1\n return image", "def prep_data(labels, image_root):\n labels = split_description(labels)\n labels = convert_plastics(labels)\n\n # Encoding shape and color data\n labels['Shape'] = encode_column(labels[['Shape']])\n labels['Color'] = encode_column(labels[['Color']])\n labels['isPlastic'] = encode_column(labels[['isPlastic']])\n labels = add_filenames(labels, image_root)\n labels = labels.dropna().reset_index()\n\n return labels", "def compress_image(filename,k):", "def preprocess(exam, data_folder, save_path, image_format):\n for v in ['L-CC', 'L-MLO', 'R-CC', 'R-MLO']:\n if len(exam[v]) == 0:\n continue\n else:\n for image in exam[v]:\n image_path = data_folder + '/' + image + '.' + image_format\n # Extract subdirectories\n subdirs = \"/\".join(image.split('/')[:-1])\n save_dirs = os.path.join(save_path, subdirs)\n # Extract image id\n image_id = image.split('/')[-1]\n # Create save directories\n os.makedirs(save_dirs, exist_ok=True)\n png_save_path = os.path.join(save_dirs, image_id + '.png')\n with Image(filename=image_path, format=image_format) as img:\n with img.clone() as i:\n i.auto_level()\n with i.convert('png') as png_image:\n png_image.transform(resize='896x1152!')\n png_image.save(filename=png_save_path)", "def main():\n\n #Parse input arguments\n from argparse import ArgumentParser\n\n parser = ArgumentParser()\n\n parser.add_argument(\"-i\", \"--image\", dest=\"image\",\n help=\"specify the name of the image\", metavar=\"IMAGE\")\n\n args = parser.parse_args()\n\n #Load image\n if args.image is None:\n print(\"Please specify the name of image\")\n print(\"use the -h option to see usage information\")\n sys.exit(2)\n else:\n image_name = args.image.split(\".\")[0]\n input_image = cv2.imread(args.image, 0)\n\n\n bin_img = bi.binary_image()\n hist = bin_img.compute_histogram(input_image)\n\n outputDir = 'output/cellct/'\n outputDir_compress = 'output/Compression/'\n\n #Saving histogram to output directory \n hist_fig = plt.plot(hist)\n plt.savefig(outputDir+\"hist.png\")\n\n threshold = bin_img.find_optimal_threshold(hist)\n print(\"Optimal threshold: \", threshold)\n\n binary_img = bin_img.binarize(input_image)\n output_image_name = outputDir + \"binary_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, binary_img)\n\n #blobcoloring\n cell_count_obj = cc.cell_counting()\n\n regions = cell_count_obj.blob_coloring(binary_img)\n stats = cell_count_obj.compute_statistics(regions)\n\n cell_stats_img = cell_count_obj.mark_regions_image(binary_img, stats)\n output_image_name = outputDir + \"cell_stats_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, cell_stats_img)\n\t\n #Compression\n rle_obj = rle.rle()\n rle_code = rle_obj.encode_image(binary_img)\n print(\"-------------- Runlength Code -------------------\")\n print(rle_code)\n\n [height, width] = binary_img.shape\n\n decoded_image = rle_obj.decode_image(rle_code, height, width)\n\n output_image_name = outputDir_compress + \"decoded_image_\" + datetime.now().strftime(\"%m%d-%H%M%S\") + \".jpg\"\n cv2.imwrite(output_image_name, decoded_image)", "def compress_image(filename,k):\n img_color=plt.imread(filename)\n orig=img_color.copy()\n R=img_color[:,:,0]\n G=img_color[:,:,1]\n B=img_color[:,:,2]\n \n m,n=(R.shape[0],R.shape[1])\n u1,s1,vh1=la.svd(R,full_matrices=False)\n u2,s2,vh2=la.svd(G,full_matrices=False)\n u3,s3,vh3=la.svd(B,full_matrices=False)\n img_color[:,:,0]=u1[:,:k].dot(np.diag(s1[:k]).dot(vh1[:k,:]))\n img_color[:,:,1]=u2[:,:k].dot(np.diag(s2[:k]).dot(vh2[:k,:]))\n img_color[:,:,2]=u3[:,:k].dot(np.diag(s3[:k]).dot(vh3[:k,:]))\n plt.subplot(211)\n img_color[img_color>1]=1.\n img_color[img_color<0]=0.\n plt.imshow(img_color)\n plt.subplot(212)\n plt.imshow(orig)\n plt.show()\n print G", "def compress_image(filename, s):\r\n image = imread(filename) / 255\r\n size = image.shape\r\n orig_entries = image.size\r\n #colored\r\n if len(size) == 3:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image)\r\n orig.axis(\"off\")\r\n #red in image\r\n R = image[:,:,0]\r\n #green in image\r\n G = image[:,:,1]\r\n #blue in image\r\n B = image[:,:,2]\r\n #approximate red, green and blue in range\r\n new_R, entries_R = svd_approx(R,s)\r\n new_R = np.clip(new_R,0,1)\r\n new_G, entries_G = svd_approx(G,s)\r\n new_G = np.clip(new_G,0,1)\r\n new_B, entries_B = svd_approx(B,s)\r\n new_B = np.clip(new_B,0,1)\r\n #stack all in one array\r\n new_image = np.dstack((new_R,new_G,new_B))\r\n #plot image\r\n new = plt.subplot(122)\r\n new.imshow(new_image)\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - (entries_R+entries_G+entries_B)) + \" Entries\")\r\n\r\n\r\n #grayscale\r\n else:\r\n #plot original\r\n orig = plt.subplot(121)\r\n orig.imshow(image, cmap=\"gray\")\r\n orig.axis(\"off\")\r\n #approximate the image\r\n new_A, entries = svd_approx(image,s)\r\n #plot it\r\n new = plt.subplot(122)\r\n new.imshow(new_A, cmap=\"gray\")\r\n new.axis(\"off\")\r\n #title image with saved number of entries\r\n plt.suptitle(str(orig_entries - entries) + \" Entries\")\r\n\r\n plt.show()", "def make_label_data(self):\n from xml.etree.ElementTree import Element, SubElement, dump, ElementTree, parse\n\n if not self.graphicsView.hasImage():\n self.showImageSelectionMessageBox()\n return\n\n app_doc_data = AppDocData.instance()\n project = app_doc_data.getCurrentProject()\n\n smalls = []\n bigs = []\n\n symbol_list = app_doc_data.getTargetSymbolList(all=True)\n for symbol in symbol_list:\n if symbol.width and symbol.height:\n if symbol.width > 300 or symbol.height > 300:\n bigs.append(symbol.getName())\n else:\n smalls.append(symbol.getName())\n\n symbols = [item for item in self.graphicsView.scene().items() if issubclass(type(item), SymbolSvgItem)]\n names = [smalls, bigs]\n\n img = app_doc_data.activeDrawing.image_origin\n\n small_size = 500\n big_size = 850\n\n save_path = project.getTrainingSymbolFilePath()\n\n index = 0\n for size in [small_size, big_size]:\n offsets = [0, int(size / 2)]\n\n width, height = img.shape[1], img.shape[0]\n width_count, height_count = width // size + 2, height // size + 2\n b_width, b_height = width_count * size, height_count * size\n b_img = np.zeros((b_height, b_width), np.uint8) + 255\n b_img[:height, :width] = img[:, :]\n\n for offset in offsets:\n for row in range(height_count):\n for col in range(width_count):\n x, y = col * size + offset, row * size + offset\n tile_rect = QRectF(x, y, size, size)\n tile_symbols = []\n for symbol in [symbol for symbol in symbols if symbol.name in names[index]]:\n if tile_rect.contains(symbol.sceneBoundingRect()):\n tile_symbols.append(symbol)\n symbols.remove(symbol)\n\n if tile_symbols:\n training_uid = str(uuid.uuid4())\n training_image_path = os.path.join(save_path, training_uid + '.png')\n training_xml_path = os.path.join(save_path, training_uid + '.xml')\n\n # save image\n #_img = b_img[round(tile_rect.top()):round(tile_rect.bottom()),\n # round(tile_rect.left()):round(tile_rect.right())]\n #cv2.imwrite(training_image_path, _img)\n _img = self.graphicsView.image().copy(round(tile_rect.left()), round(tile_rect.top()), round(tile_rect.width()), round(tile_rect.height()))\n _img.save(training_image_path)\n\n # save label\n xml = Element('annotation')\n SubElement(xml, 'folder').text = 'None'\n SubElement(xml, 'filename').text = os.path.basename(save_path)\n\n pathNode = Element('path')\n pathNode.text = save_path.replace('/', '\\\\')\n xml.append(pathNode)\n\n sourceNode = Element('source')\n databaseNode = Element('database')\n databaseNode.text = 'Unknown'\n sourceNode.append(databaseNode)\n xml.append(sourceNode)\n\n sizeNode = Element('size')\n widthNode = Element('width')\n widthNode.text = str(int(tile_rect.width()))\n sizeNode.append(widthNode)\n heightNode = Element('height')\n heightNode.text = str(int(tile_rect.height()))\n sizeNode.append(heightNode)\n depthNode = Element('depth')\n depthNode.text = '3'\n sizeNode.append(depthNode)\n xml.append(sizeNode)\n\n segmentedNode = Element('segmented')\n segmentedNode.text = '0'\n xml.append(segmentedNode)\n\n labelContent = []\n counts = {}\n for item in tile_symbols:\n rect = item.sceneBoundingRect()\n label, xMin, yMin, xMax, yMax = item.name, int(rect.x() - 5 - x), int(rect.y() - 5 - y), int(rect.x() + rect.width() + 5 - x), int(rect.y() + rect.height() + 5 - y)\n xMin = xMin if xMin > 0 else 0\n yMin = yMin if yMin > 0 else 0\n xMax = xMax if xMax < size else size\n yMax = yMax if yMax < size else size\n\n if label == 'None' or label == '':\n continue\n if label not in labelContent:\n labelContent.append(label)\n counts[label] = 1\n else:\n counts[label] = counts[label] + 1\n\n objectNode = Element('object')\n nameNode = Element('name')\n nameNode.text = label\n objectNode.append(nameNode)\n poseNode = Element('pose')\n poseNode.text = 'Unspecified'\n objectNode.append(poseNode)\n truncatedNode = Element('truncated')\n truncatedNode.text = '0'\n objectNode.append(truncatedNode)\n difficultNode = Element('difficult')\n difficultNode.text = '0'\n objectNode.append(difficultNode)\n\n bndboxNode = Element('bndbox')\n xminNode = Element('xmin')\n xminNode.text = str(xMin)\n bndboxNode.append(xminNode)\n yminNode = Element('ymin')\n yminNode.text = str(yMin)\n bndboxNode.append(yminNode)\n xmaxNode = Element('xmax')\n xmaxNode.text = str(xMax)\n bndboxNode.append(xmaxNode)\n ymaxNode = Element('ymax')\n ymaxNode.text = str(yMax)\n bndboxNode.append(ymaxNode)\n objectNode.append(bndboxNode)\n\n xml.append(objectNode)\n\n ElementTree(xml).write(training_xml_path)\n\n index += 1\n\n QMessageBox.about(self, self.tr(\"Notice\"), self.tr('Successfully applied. '))", "def processImage(fpaths_src, label_map, fnames_src, img_idx):\n global counter\n \n n_imgs = len(fpaths_src)\n print(\"Processing %s -- %s/%s (%s%%)\"%(fnames_src[img_idx],counter,n_imgs,round(100.*counter/n_imgs)))\n \n path = fpaths_src[img_idx]\n src_image_raw = Image.open(path, 'r')\n \n # size normalization of the image\n if not (args.resize == None):\n src_image_raw = src_image_raw.resize(size=(int(args.resize), int(args.resize)), resample=Image.BILINEAR)\n \n # convert to writable numpy array\n src_image = np.asarray(src_image_raw, dtype=np.uint8)\n src_image.setflags(write=True)\n \n # some dummy label\n label = -99.99\n # the labels\n if not (label_map == {}):\n # let the label start at 1, instead of 0\n label = int(label_map[fnames_src[img_idx]])+1\n else:\n # add a dummy label (between 0 and 1)\n label = np.random.rand()\n \n image_features = []\n \n # add the original\n image_features.append(generateFeatures(src_image,label,args.knn))\n \n if args.augment == 1:\n print \"Augmenting dataset...\"\n # data augmentation techniques\n rotation_angles = [i for i in xrange(36,360,36)] # samples are transformed by these rotation angles\n \n flip_x = True # data augmentation by flipping around x axis\n flip_y = True # data augmentation by flipping around y axis\n flip_xy= True # data augmentation by flipping around x AND y axis\n \n for angle in rotation_angles:\n rot_matrix = cv2.getRotationMatrix2D(\n (src_image.shape[1]/2.,src_image.shape[0]/2.),\n angle,\n 1.0)\n rot_sample_crop = np.array([])\n rot_sample_crop = cv2.warpAffine(src_image,\n rot_matrix,\n (src_image.shape[1],src_image.shape[0]),\n rot_sample_crop,\n cv2.INTER_LINEAR,\n cv2.BORDER_REFLECT_101)\n \n # add the sample to the dataset\n image_features.append(generateFeatures(rot_sample_crop,label,args.knn))\n \n # add 3 flipped copies\n if flip_x:\n rot_sample_crop_x = cv2.flip(rot_sample_crop,0)\n image_features.append(generateFeatures(rot_sample_crop_x,label,args.knn))\n if flip_y:\n rot_sample_crop_y = cv2.flip(rot_sample_crop,1)\n image_features.append(generateFeatures(rot_sample_crop_y,label,args.knn))\n if flip_xy:\n rot_sample_crop_xy = cv2.flip(rot_sample_crop,-1)\n image_features.append(generateFeatures(rot_sample_crop_xy,label,args.knn))\n \n counter+=1\n\n # return a nx128 or nxk matrix for the features of all modifications of this image\n feat_matrix = np.asarray(image_features)\n return feat_matrix", "def decode(qf, st, en_filename, jpg_filename, img_shape):\n row, col, ch = img_shape\n en_bin = read_binstr_frome_file(en_filename)\n zbs = decode_AC_DC(en_bin, img_shape, st)\n sample_size = (row, col)\n if st == (4, 1, 1):\n sample_size = ((row // 2), (col // 2))\n elif st == (4, 2, 2):\n sample_size = (row, (col // 2))\n img = np.zeros((row, col, ch), np.uint8)\n for c, zb in enumerate(zbs):\n blocks = de_zigzag(zb)\n q_t = get_quantization_table_by_factor(qf, channel_select[c % len(zbs)])\n img_blocks = get_dequantization_img_blocks(blocks, q_t)\n\n b_r, b_c = (row, col) if c == 0 else sample_size\n\n tmp = np.ones((b_r, b_c), np.int8) * 128\n for i, (row_offset, col_offset) in enumerate(get_block_iterator(b_r, b_c)):\n tmp[row_offset:row_offset + 8 if row_offset + 8 <= b_r else b_r,\n col_offset:col_offset + 8 if col_offset + 8 <= b_c else b_c] += img_blocks[i]\n\n # inverse subsample\n img_blocks = cv2.resize(tmp, (row, col))\n\n img[:, :, c] = np.round(img_blocks)\n\n if ch == 3:\n img = cv2.cvtColor(img, cv2.COLOR_YCrCb2BGR)\n\n cv2.imwrite(jpg_filename, img)\n\n return img", "def semantic_image_generator(raw_data, output_path, width, height):\n raw_data = np.frombuffer(raw_data, dtype=np.uint8)\n raw_data = raw_data.reshape(height, width, -1)[:, :, 2:3]\n output = np.zeros((height, width, 3), dtype=np.uint8)\n color_map = create_carla_label_colormap_cityscapes_style()\n for i in range(height):\n for j in range(width):\n output[i, j, :] = color_map[int(raw_data[i, j])]\n output = Image.fromarray(output)\n output.save(output_path)\n return output", "def prep_image_data(arg_dict):\n cat_df = pd.read_csv(arg_dict['category_file'],\n skiprows=1,\n sep='\\s+')\n bbox_df = pd.read_csv(arg_dict['bbox_file'],\n skiprows=1,\n sep='\\s+')\n img_dir = arg_dict['image_dir']\n\n combo_df = pd.merge(cat_df, bbox_df, how='outer', on='image_name')\n combo_df['image_name'] = combo_df['image_name'].apply(\n lambda x: x[len('img'):-len('.jpg')])\n labels = Labels(combo_df, img_dir, n_images_loaded=-1)\n labels.set_data_target('raw_image', chunksize=3000)\n return labels", "def writeImage(image, filename):\n Sky = [128,128,128]\n Building = [128,0,0]\n Pole = [192,192,128]\n Road_marking = [255,69,0]\n Road = [128,64,128]\n Pavement = [60,40,222]\n Tree = [128,128,0]\n SignSymbol = [192,128,128]\n Fence = [64,64,128]\n Car = [64,0,128]\n Pedestrian = [64,64,0]\n Bicyclist = [0,128,192]\n Unlabelled = [0,0,0]\n r = image.copy()\n g = image.copy()\n b = image.copy()\n label_colours = np.array([Sky, Building, Pole, Road_marking, Road, Pavement, Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])\n for l in range(0,12):\n r[image==l] = label_colours[l,0]\n g[image==l] = label_colours[l,1]\n b[image==l] = label_colours[l,2]\n rgb = np.zeros((image.shape[0], image.shape[1], 3))\n rgb[:,:,0] = r/1.0\n rgb[:,:,1] = g/1.0\n rgb[:,:,2] = b/1.0\n im = Image.fromarray(np.uint8(rgb))\n # im.save('/Users/koheiyamamoto/Desktop/SegNet/out/' + filename)\n im.save('./out/' + filename)", "def decode(p):\n #assert p.endswith('.' + EXTENSION)\n p2 = os.path.basename(p).replace('baseline.png', '.png')\n p2p = os.path.join('/mnt/Volume0/test/clic2020-devkit/result/', p2) #add by me\n pp = os.path.join('/mnt/Volume0/test/clic2020-devkit/targets',p2)\n p2 = os.path.join('/mnt/Volume0/test/clic2020-devkit/inputs/', p2) #add by me\n p1 = pframe_dataset_shared.get_previous_frame_path(p2)\n #p1 = os.path.join('/mnt/Volume0/test/clic2020-devkit/test_data/inputs/', p1)\n #assert os.path.isfile(p1), (p2, p1, p, len(glob.glob('*.png')))\n b = Image.open(p).convert('L')\n f2_reconstructed = decoder(np.array(Image.open(p1)), b)\n Image.fromarray(f2_reconstructed).save(p2p)\n return f2_reconstructed, np.array(Image.open(pp))", "def create_png(image, label):\n sv = \"/home/avojtekova/Desktop/final_results/star_det/generated_images/\" \n \n for i in range(len(image)):\n data = fits.getdata(image[i][0], ext = 0)\n norm = ImageNormalize(data,interval = ZScaleInterval(), stretch = LinearStretch())\n \n print(image[i][0])\n plt.imshow(data, cmap='Greys_r', origin='lower', norm=norm)#[1250:1750, 2000:2500] add this when you want just part of image \n plt.title(label[i])\n plt.axis('off')\n plt.tight_layout()\n plt.legend\n if i<2:\n if not os.path.isdir(sv + image[i][0][-33:-25] + \"/\") :\n os.makedirs(sv + image[i][0][-33:-25] + \"/\")\n plt.savefig(sv + image[i][0][-33:-25] + \"/\" + label[i]+ \"_\" + image[i][0][-33:-25] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0) \n else:\n if not os.path.isdir(sv + image[i][0][-40:-32] + \"/\") :\n os.makedirs(sv + image[i][0][-40:-32] + \"/\")\n plt.savefig(sv + image[i][0][-40:-32] + \"/\" + label[i]+image[i][0][-40:-32] + \"_big.png\", dpi = 1000)#,bbox_inches='tight', pad_inches = 0)\n plt.close()", "def create_image_caption_pairs(self):", "def setup_annotations(self):\n sbd_path = get_data_path('sbd')\n target_path = pjoin(self.root, 'SegmentationClass/pre_encoded')\n if not os.path.exists(target_path): os.makedirs(target_path)\n path = pjoin(sbd_path, 'dataset/train.txt')\n sbd_train_list = tuple(open(path, 'r'))\n sbd_train_list = [id_.rstrip() for id_ in sbd_train_list]\n train_aug = self.files['train'] + sbd_train_list\n\n # keep unique elements (stable)\n train_aug = [train_aug[i] for i in \\\n sorted(np.unique(train_aug, return_index=True)[1])]\n self.files['train_aug'] = train_aug\n set_diff = set(self.files['val']) - set(train_aug) # remove overlap\n self.files['train_aug_val'] = list(set_diff)\n\n pre_encoded = glob.glob(pjoin(target_path, '*.png'))\n expected = np.unique(self.files['train_aug'] + self.files['val']).size\n\n if len(pre_encoded) != expected:\n print(\"Pre-encoding segmentation masks...\")\n for ii in tqdm(sbd_train_list):\n lbl_path = pjoin(sbd_path, 'dataset/cls', ii + '.mat')\n data = io.loadmat(lbl_path)\n lbl = data['GTcls'][0]['Segmentation'][0].astype(np.int32)\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, ii + '.png'), lbl)\n\n for ii in tqdm(self.files['trainval']):\n fname = ii + '.png'\n lbl_path = pjoin(self.root, 'SegmentationClass', fname)\n lbl = self.encode_segmap(m.imread(lbl_path))\n lbl = m.toimage(lbl, high=lbl.max(), low=lbl.min())\n m.imsave(pjoin(target_path, fname), lbl)\n\n assert expected == 9733, 'unexpected dataset sizes'", "def make_label_map(path, label_list):\r\n \r\n img = []\r\n for name in path:\r\n now = np.zeros((224,224))\r\n im = cv2.resize(cv2.imread(name), (224,224)).tolist()\r\n for y, i in enumerate(im):\r\n for x, j in enumerate(i):\r\n try:\r\n now[y, x] = label_list.index(j)\r\n\r\n except ValueError:\r\n now[y, x] = 0\r\n\r\n img.append(now)\r\n return img", "def label_visualize(img_dir):\n img = scipy.misc.imread(img_dir).astype(np.uint8)\n yo = np.nonzero(img == 1)\n visual = np.zeros((img.shape[0], img.shape[1], 3), dtype=np.uint8)\n\n for i in range(0, 34):\n index = np.nonzero(img == i)\n visual[index + (0,)] = labels[i][0]\n visual[index + (1,)] = labels[i][1]\n visual[index + (2,)] = labels[i][2]\n\n scipy.misc.imsave('./' + img_dir.split('/')[-1], visual)", "def load_pascal(data_dir, split='train'):\n # Wrote this function\n # idx = 0\n # if idx >20:\n # idx+=1\n # break\n \"\"\"\n print(\"Begin Load Images ------------------------------------\")\n images = []\n # images_dict -> key: img_file_idx, value: rgb image ndarray (256*256*3)\n images_dict = {}\n # count\n for infile in glob.glob(\"./VOCdevkit/VOC2007/JPEGImages/*.jpg\"):\n # reshape the images to 256*256*3\n file, ext = os.path.splitext(infile)\n file_idx = file[-6:]\n\n try:\n im = Image.open(infile)\n resized_img = im.resize((256, 256), Image.ANTIALIAS)\n resized_arr = np.array(resized_img)\n images_dict[file_idx] = resized_arr.astype(np.float32)\n except IOError:\n print(\"Error\")\n\n save_obj(images_dict,\"images_dict\")\n \"\"\"\n # label_mat: 2d array, each annotation file is one label_col, multiple label_col mean multiple annotation files\n label_mat = []\n weight_mat = []\n image_mat = []\n\n images_dict = load_obj(\"images_dict\")\n print(\"Return Load Images ------------------------------------\")\n\n # for filename in os.listdir(\"./VOCdevkit/VOC2007/ImageSets/Main/\"):\n for filename in enumerate(CLASS_NAMES):\n\n with open(\"./VOCdevkit/VOC2007/ImageSets/Main/\"+filename[1] +\"_\"+split+\".txt\") as fp:\n print(fp)\n image_mat = []\n label_col = []\n weight_col = []\n line = fp.readline()\n cnt = 1\n while line:\n\n label_idx = line.strip()[:-3]\n try:\n # print(\"Line {}: {}\".format(label_idx, type(label_idx)))\n # Be aware!! '000005 ' is different from '000005', there is a space in the first string!!!\n # label_idx = '000005 ' label_idx[:-1]='000005'\n image_mat.append(images_dict[label_idx])\n except IOError:\n print(\"Error Line {}: {}\".format(label_idx, type(label_idx)))\n\n label_flag = int(line.strip()[-2:])\n\n if label_flag is 0 or label_flag is -1:\n label_col.append(np.int32(0))\n else:\n label_col.append(np.int32(1))\n\n if label_flag is 1 or label_flag is -1:\n weight_col.append(np.int32(1))\n else:\n weight_col.append(np.int32(0))\n\n line = fp.readline()\n cnt += 1\n np_label_col = np.asarray(label_col)\n label_mat.append(np_label_col)\n # print(np.shape(label_mat))\n np_weight_col = np.asarray(weight_col)\n weight_mat.append(np_weight_col)\n\n # print('image_mat {}: label_mat {}'.format(np.shape(image_mat), np.shape(label_mat)))\n np_image_mat = np.asarray(image_mat)\n np_label_mat = np.asarray(label_mat)\n np_weight_mat = np.asarray(weight_mat)\n # print('np_image_mat {}: np_label_mat {}'.format(np.shape(np_image_mat), np.shape(np_label_mat)))\n np_trans_label_mat = np_label_mat.transpose()\n np_trans_weight_mat = np_weight_mat.transpose()\n # print(np.shape(np_label_mat))\n # print(np.shape(np_weight_mat))\n print('np_trans_label_mat {}: np_trans_weight_mat {}'.format(np.shape(np_trans_label_mat), np.shape(np_trans_weight_mat)))\n print(\"Return Load Weights and Labels ------------------------------------\")\n return np_image_mat, np_trans_label_mat, np_trans_weight_mat", "def pre_processing_function(label, filename: str, augmentor: Augmentor = None):\n image = imread(filename)\n if augmentor is not None:\n image = np.round(augmentor.run(image)).astype(np.uint8)\n\n return image, label", "def encode_decode(self, img, img_metas):\n pass", "def __init__(self, source, label_config_center, name=None, header=None, \n view_min=None, view_max=None, alpha=255, colormap='gray',\n cross_pos=None):\n if isinstance(source, np.ndarray):\n self._data = np.rot90(source)\n if name == None:\n self._name = 'new_image'\n else:\n self._name = str(name)\n if not isinstance(header, nib.nifti1.Nifti1Header):\n raise ValueError(\"Parameter header must be specified!\")\n elif header.get_data_shape() == source.shape:\n self._header = header\n self._img = None\n else:\n raise ValueError(\"Data dimension does not match.\")\n else:\n self._img = nib.load(source)\n self._header = self._img.get_header()\n basename = os.path.basename(source.strip('/'))\n self._name = re.sub(r'(.*)\\.nii(\\.gz)?', r'\\1', basename)\n self.save_mem_load()\n\n # For convenience, define a shift variable\n self._y_shift = self.get_data_shape()[1] - 1\n\n if view_min == None:\n self._view_min = self._data.min()\n else:\n self._view_min = view_min\n\n if view_max == None:\n self._view_max = self._data.max()\n else:\n self._view_max = view_max\n\n self._alpha = alpha\n self._colormap = colormap\n self._rgba_list = range(self.get_data_shape()[2])\n \n # bool status for the item\n self._visible = True\n if len(self.get_data_shape()) == 3:\n self._4d = False\n else:\n self._4d = True\n self._time_point = 0\n\n # temporal variant for OrthView\n self._cross_pos = cross_pos\n\n # define a dictionary \n self.label_config_center = label_config_center\n self.label_config_center.single_roi_view_update.connect(self.update_single_roi)\n \n # undo redo stacks\n self.undo_stack = DoStack()\n self.redo_stack = DoStack()\n\n self.update_rgba()\n if self._cross_pos:\n self.update_orth_rgba()", "def generate_labels(pics):\r\n return []", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def make_layered_psd_from_images():\n\n\t\n\tdoc = open_document(FILEPATHS[0], show=False)\n\tdoc_root = doc.rootNode()\n\t\n\tdocs = []\n\tdocs.append(doc)\n\n\tall_layers = get_layers(doc)\n\tfor i in range(1, len(FILEPATHS)):\n\t\tdocx = open_document(FILEPATHS[i], show=False)\n\t\tdocs.append(docx)\n\t\tdocx_layers = get_layers(docx)\n\t\tfor layer in docx_layers:\n\t\t\tall_layers.append(layer.clone())\n\t\t\t# doc.rootNode().addChildNode(layer, parent_node)\n\tdoc_root.setChildNodes(all_layers)\n\n\tprint('Debug: all nodes: %s' % doc.rootNode().childNodes())\n\t# doc.refreshProjection()\n\n\tsave_filepath = filepath = QtWidgets.QFileDialog.getSaveFileName()[0]\n\tr = doc.saveAs(save_filepath)\n\tprint('Debug: saved: %s' % save_filepath)\n\t\n\tfor doc in docs:\n\t\tprint('Debug: closing %s' % doc)\n\t\tdoc.close()\n\n\tprint('Debug: Script done')" ]
[ "0.79495394", "0.7899799", "0.7884121", "0.7781928", "0.7687692", "0.60020334", "0.5997393", "0.5904447", "0.58416754", "0.5806961", "0.5723574", "0.5705346", "0.56988686", "0.56837463", "0.5680749", "0.5661601", "0.5639231", "0.55816334", "0.55626464", "0.555133", "0.5549232", "0.5540666", "0.5540165", "0.55375177", "0.5529249", "0.55179787", "0.5476792", "0.5471852", "0.54625255", "0.5459958" ]
0.79031765
1
linearly scale the values of an array in the range [0, 1]
def scale01(arr): walk_arr_01 = numpy.interp(arr, (numpy.amin(arr), numpy.amax(arr)), (-1, +1)) # linear scaling return walk_arr_01 #return the scaled array
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def scale0to1(img):\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if min == max:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img-min) / (max-min)\r\n\r\n return img.astype(np.float32)", "def scale0to1(img):\r\n\r\n img = img.astype(np.float32)\r\n\r\n if img.shape[-1] != 1:\r\n img = np.sqrt(np.sum(img**2, axis=-1, keepdims=True))\r\n min = np.min(img)\r\n max = np.max(img)\r\n\r\n if np.absolute(min-max) < 1.e-6:\r\n img.fill(0.5)\r\n else:\r\n img = (img - min)/(max - min)\r\n\r\n return img.astype(np.float32)", "def scale_1d(x):\n return (min(x), max(x), len(x))", "def normalize_1d(x, scale=(0, 1, 1024)):\n new_min = scale[0]\n new_max = scale[1]\n new_len = scale[2]\n (min_x, max_x, old_size) = scale_1d(x)\n x_norm = (x - min_x) / (max_x - min_x)\n old_baseline = np.linspace(0, 1, old_size)\n new_baseline = np.linspace(0, 1, new_len)\n if len(old_baseline) <= 1:\n old_baseline = np.array([0, 1])\n x_norm = np.array([1, 0])\n x_interp = interp.interp1d(old_baseline, x_norm)\n x_resized = (x_interp(new_baseline) * (new_max - new_min)) + new_min\n return x_resized", "def scale(input):\n return (input - np.min(input)) / ((np.max(input) - np.min(input)))", "def normalize(array):\n high = array.max()\n low = array.min()\n rng = high - low\n array[:] = 1.0 - ((high - array) / rng)", "def scale(x):\n min_x, max_x = numpy.min(x), numpy.max(x)\n if min_x != max_x:\n x = (x-min_x)/(max_x-min_x)\n else:\n # all the numbers are the same in x\n x = numpy.asarray([1/len(x) for i in range(len(x)) ])\n return x.tolist()", "def lin_scale( val, x1, y1, x2, y2 ):\r\n x_range = (x2 - x1)\r\n new_val = 0\r\n if x_range is 0:\r\n new_val = y1\r\n else:\r\n y_range = ( y2 - y1 )\r\n new_val = ( ( ( val - x1 ) * y_range ) / x_range ) + y1\r\n\r\n return new_val", "def linearscale(input, boundfrom, boundto, extrema=None):\n\n\tminfrom,maxfrom = boundfrom\n\tminto,maxto = boundto\n\n\t### default from bounds are min,max of the input\n\tif minfrom is None:\n\t\tif extrema:\n\t\t\tminfrom = extrema[0]\n\t\telse:\n\t\t\tminfrom = arraystats.min(input)\n\tif maxfrom is None:\n\t\tif extrema:\n\t\t\tmaxfrom = extrema[1]\n\t\telse:\n\t\t\tmaxfrom = arraystats.max(input)\n\n\trangefrom = maxfrom - minfrom\n\tif rangefrom == 0:\n\t\t# if min==max, do simple thresholding\n\t\toutput = numpy.where(input>maxfrom, maxto, minto)\n\telse:\n\t\trangeto = maxto - minto\n\t\tscale = float(rangeto) / rangefrom\n\t\toffset = minfrom * scale\n\t\toutput = input * scale - offset\n\n\treturn output", "def scale(inp: np.ndarray, new_min: float = 0., new_max: float = 1.,\n axis: int = -1) -> np.ndarray:\n xmax = inp.max(axis=axis, keepdims=True)\n xmin = inp.min(axis=axis, keepdims=True)\n a = (inp-xmin) / (xmax - xmin)\n y = a * (new_max - new_min) + new_min\n return y", "def scaleValues(values):\n\n values = values - values.min()\n return values/values.max()", "def hist_normalize_linear(data, new_min, new_max):\n data_min = np.ma.min(data)\n data_max = np.ma.max(data)\n scaled = (data - data_min) * ((new_max - new_min) / (data_max - data_min))\n scaled.mask = data.mask\n return scaled", "def scale(a, tmin=0.0, tmax=1.0):\n return np.interp(a, (a.min(), a.max()), (tmin, tmax))", "def normalize(x):\n a = 0\n b = 1\n scale_min = 0\n scale_max = 255\n return a + ( ( (x - scale_min)*(b - a) )/( scale_max - scale_min ) )", "def scale_it(val):\n return scale(val, 0, 1, bpm_range[0], bpm_range[1])", "def _min_max_scale(arr, new_range=(0, 255)):\n # get array's current min and max\n mn = arr.min()\n mx = arr.max()\n\n # check if scaling needs to be done to be in new_range\n if mn < new_range[0] or mx > new_range[1]:\n # perform min-max scaling\n scaled = (new_range[1] - new_range[0]) * (arr - mn) / (mx - mn) + new_range[0]\n else:\n # return array if already in range\n scaled = arr\n\n return scaled", "def normalize_0d(x, old_scale=(0, 1, 1024), new_scale=(0, 1, 1024)):\n old_delta = old_scale[1] - old_scale[0]\n new_delta = new_scale[1] - new_scale[0]\n old_min = old_scale[0]\n new_min = new_scale[0]\n return (x - old_min) * (new_delta / old_delta) + new_min", "def normalize(arr):\n arr = arr.astype('float')\n # Do not touch the alpha channel\n for i in range(1):\n minval = arr[...,i].min()\n maxval = arr[...,i].max()\n if minval != maxval:\n arr[...,i] -= minval\n arr[...,i] *= (255.0/(maxval-minval))\n return arr", "def normalize(arr):\n m = np.min(arr)\n arr = arr - m\n M = np.max(arr)\n arr = arr / M\n return arr", "def scale(x, feature_range=(-1,1)):\r\n x = x * 2 - 1\r\n return x", "def normalise(x):\n return (x - jnp.min(x)) / (jnp.max(x) - jnp.min(x))", "def scale(self, value):\r\n return (float(value)-float(self.minimum))/float(self.maximum-self.minimum)*2.0 - 1.0", "def rescale(x):\n if x.min() != 0:\n raise ValueError('input should have min zero.')\n\n x /= x.max() # max 1\n x *= 2 # max 2\n x -= 1 # range -1, 1\n\n if x.min() != -1 and x.max() != 1:\n raise Exception\n\n return x", "def normalize_data(data, min=0, max=1):\r\n import numpy as np\r\n assert isinstance(data, np.ndarray)\r\n\r\n max_value = np.max(data)\r\n min_value = np.min(data)\r\n\r\n scaled = np.interp(data, [min_value, max_value], [min, max])\r\n # convert to float64\r\n scaled = scaled.astype(np.float64)\r\n\r\n return scaled", "def _scale_array(arr, clip=True):\n if clip:\n scaled = np.clip(arr, 0, 255)\n else:\n scale_range = (max([arr.min(), 0]), min([arr.max(), 255]))\n scaled = _min_max_scale(arr, new_range=scale_range)\n\n return scaled", "def rescale(A, d1, d2):\n \n A[0, 1] = A[0, 1] * (d2 / d1)\n A[1, 0] = A[1, 0] * (d1 / d2)\n \n return A", "def scale_mag_1(x):\n return np.array([np.true_divide(ui, mag(x)) for ui in x])", "def scaleto01(val,check=True):\n if type(val) is not list and type(val) is not np.ndarray:\n val = [val]\n if type(val) is list:\n val = np.array(val)\n assert type(val) is np.ndarray\n assert not check or np.all((val==-1) + (val==1))\n return (val+1)/2.0", "def scale(x, minimum, maximum):\n return (x - minimum) / (maximum - minimum)" ]
[ "0.7274082", "0.71151817", "0.7070224", "0.70607144", "0.695351", "0.6909782", "0.6863304", "0.683024", "0.6815376", "0.68095505", "0.6759205", "0.6644985", "0.6604848", "0.65765154", "0.65633816", "0.65503216", "0.6484777", "0.6470204", "0.644784", "0.64431757", "0.64138615", "0.6412299", "0.63703316", "0.6368305", "0.635279", "0.63487786", "0.63365394", "0.63241345", "0.6312529", "0.62925816" ]
0.7821479
0
extends the init_buffer of OffsetColorProgram class by creating the additional carry flag VBO
def _init_buffers(self, v, n, _): super()._init_buffers(v, n, _) self.vbos.append(gl.glGenBuffers(1)) # init VBO 2 - dynamic color data gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) loc = self.get_attribute_location("carried") gl.glEnableVertexAttribArray(loc) gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0)) gl.glVertexAttribDivisor(loc, 1) gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_buffer(self):\n \n self.shape.buf = [pi3d.Buffer(self.shape, self.verts, self.texcoords, self.inds, self.norms)]\n self.shape.set_draw_details(self.shader, [self.spritesheet.img])", "def setupVAO(self, gpuShape):\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def _init_plot_buffer(self, configuration):\n if not isinstance(configuration, dict):\n configuration = { 'length': configuration }\n\n # initialize vao/vbo\n vao, vbo = util.VAO(), util.VBO()\n\n # put kernel function into vertex shader\n vertex_shader_kernel = open(SHADER_DIR+'/data.vert.glsl').read()\n if configuration['kernel'] is not None:\n vertex_shader_kernel = vertex_shader_kernel.replace(\n self.KERNEL_PLACEHOLDER,\n configuration['kernel'])\n\n shader = util.Shader(\n vertex=vertex_shader_kernel,\n geometry=open(SHADER_DIR+'/data.geom.glsl').read(),\n fragment=open(SHADER_DIR+'/data.frag.glsl').read(),\n link=True\n )\n norm = configuration.get('norm', float)\n buffer_configuration = {\n 'byte_count': configuration['length'] * 4,\n 'vertex_count': configuration['length']/2,\n 'point_base_color': configuration.get('point_base_color', [0,0,0.5,1]),\n 'point_size': configuration.get('point_size', norm(2.0/configuration['length'])),\n 'vao': vao,\n 'vbo': vbo,\n 'shader': shader\n }\n\n # uniforms\n shader.uniform('mat_plane', self._mat_plot)\n shader.uniform('geometry_color', buffer_configuration['point_base_color'])\n shader.uniform('dot_size', buffer_configuration['point_size'])\n\n # configure vbo\n with vbo.get(0):\n vertex_position = shader.attributeLocation('vertex_position')\n glBufferData(GL_ARRAY_BUFFER, buffer_configuration['byte_count'], None, GL_STATIC_DRAW)\n with vao:\n glVertexAttribPointer(vertex_position, 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n return buffer_configuration", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setupVAO(self, gpuShape):\n\n glBindVertexArray(gpuShape.vao)\n\n glBindBuffer(GL_ARRAY_BUFFER, gpuShape.vbo)\n glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, gpuShape.ebo)\n\n # 3d vertices + rgb color specification => 3*4 + 3*4 = 24 bytes\n position = glGetAttribLocation(self.shaderProgram, \"position\")\n glVertexAttribPointer(position, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(0))\n glEnableVertexAttribArray(position)\n \n color = glGetAttribLocation(self.shaderProgram, \"color\")\n glVertexAttribPointer(color, 3, GL_FLOAT, GL_FALSE, 24, ctypes.c_void_p(12))\n glEnableVertexAttribArray(color)\n\n # Unbinding current vao\n glBindVertexArray(0)", "def setup(self, gl_buffers, color_vbo, pos_vbo, partNumber):\n self.gl_objects = gl_buffers\n self.color_vbo, self.pos_vbo = color_vbo, pos_vbo\n self.partNumber = partNumber", "def prepareUniformBuffers(self):\n # Vertex shader uniform buffer block\n uboVSSize = sum([glm.sizeof(ubo) for ubo in self.uboVS.values()])\n bufferInfo = vk.VkBufferCreateInfo(\n sType = vk.VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,\n size = uboVSSize,\n # This buffer will be used as a uniform buffer\n usage = vk.VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT\n )\n # Create a new buffer\n self.uniformBufferVS['buffer'] = vk.vkCreateBuffer(self.device, bufferInfo, None)\n # Get memory requirements including size, alignment and memory type\n memReqs = vk.vkGetBufferMemoryRequirements(self.device, self.uniformBufferVS['buffer'])\n # Get the memory type index that supports host visibile memory access\n # Most implementations offer multiple memory types and selecting the correct one to allocate memory from is crucial\n # We also want the buffer to be host coherent so we don't have to flush (or sync after every update.\n #Note: This may affect performance so you might not want to do this in a real world application that updates buffers on a regular base\n allocInfo = vk.VkMemoryAllocateInfo(\n sType = vk.VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,\n pNext = None,\n allocationSize = memReqs.size,\n memoryTypeIndex = self.vulkanDevice.getMemoryType(memReqs.memoryTypeBits, vk.VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | vk.VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)\n )\n # Allocate memory for the uniform buffer\n self.uniformBufferVS['memory'] = vk.vkAllocateMemory(self.device, allocInfo, None)\n # Bind memory to buffer\n vk.vkBindBufferMemory(self.device, self.uniformBufferVS['buffer'], self.uniformBufferVS['memory'], 0)\n # Store information in the uniform's descriptor that is used by the descriptor set\n self.uniformBufferVS['descriptor'] = vk.VkDescriptorBufferInfo(\n buffer = self.uniformBufferVS['buffer'],\n offset = 0,\n range = uboVSSize\n )\n\n self.updateUniformBuffers()", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def buildCommandBuffers(self):\n cmdBufInfo = vk.VkCommandBufferBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,\n pNext = None\n )\n # Set clear values for all framebuffer attachments with loadOp set to clear\n # We use two attachments (color and depth) that are cleared at the start of the subpass and as such we need to set clear values for both\n clearValues = []\n clearValue = vk.VkClearValue(\n color = [[ 0.0, 0.0, 0.2, 1.0 ]]\n )\n clearValues.append(clearValue)\n clearValue = vk.VkClearValue(\n depthStencil = [1.0, 0 ]\n )\n clearValues.append(clearValue)\n offset = vk.VkOffset2D(x = 0, y = 0)\n extent = vk.VkExtent2D(width = self.width, height = self.height)\n renderArea = vk.VkRect2D(offset = offset, extent = extent)\n for i in range(len(self.drawCmdBuffers)):\n renderPassBeginInfo = vk.VkRenderPassBeginInfo(\n sType = vk.VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,\n pNext = None,\n renderPass = self.renderPass,\n renderArea = renderArea,\n clearValueCount = 2,\n pClearValues = clearValues,\n # Set target frame buffer\n framebuffer = self.frameBuffers[i]\n )\n # wait this buffer to be released\n #vk.vkWaitForFences(self.device, 1, [self.waitFences[i]], vk.VK_TRUE, vk.UINT64_MAX)\n # rebuild this buffer\n vk.vkBeginCommandBuffer(self.drawCmdBuffers[i], cmdBufInfo)\n # Start the first sub pass specified in our default render pass setup by the base class\n # This will clear the color and depth attachment\n vk.vkCmdBeginRenderPass(self.drawCmdBuffers[i], renderPassBeginInfo, vk.VK_SUBPASS_CONTENTS_INLINE)\n # Update dynamic viewport state\n viewport = vk.VkViewport(\n height = float(self.height),\n width = float(self.width),\n minDepth = 0.0,\n maxDepth = 1.0\n )\n vk.vkCmdSetViewport(self.drawCmdBuffers[i], 0, 1, [viewport])\n # Update dynamic scissor state\n offsetscissor = vk.VkOffset2D(x = 0, y = 0)\n extentscissor = vk.VkExtent2D(width = self.width, height = self.height)\n scissor = vk.VkRect2D(offset = offsetscissor, extent = extentscissor)\n vk.vkCmdSetScissor(self.drawCmdBuffers[i], 0, 1, [scissor])\n\n # Bind descriptor sets describing shader binding points\n vk.vkCmdBindDescriptorSets(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipelineLayout, 0, 1, [self.descriptorSet], 0, None)\n # Bind the rendering pipeline\n # The pipeline (state object) contains all states of the rendering pipeline, binding it will set all the states specified at pipeline creation time\n vk.vkCmdBindPipeline(self.drawCmdBuffers[i], vk.VK_PIPELINE_BIND_POINT_GRAPHICS, self.pipeline);\n # Bind triangle vertex buffer (contains position and colors)\n offsets = [ 0 ]\n vk.vkCmdBindVertexBuffers(self.drawCmdBuffers[i], 0, 1, [self.vertices['buffer']], offsets)\n # Bind triangle index buffer\n vk.vkCmdBindIndexBuffer(self.drawCmdBuffers[i], self.indices['buffer'], 0, vk.VK_INDEX_TYPE_UINT32)\n # Draw indexed triangle\n vk.vkCmdDrawIndexed(self.drawCmdBuffers[i], self.indices['count'], 1, 0, 0, 1)\n # uncomment for imgui support\n self.drawUI(self.drawCmdBuffers[i])\n vk.vkCmdEndRenderPass(self.drawCmdBuffers[i])\n # Ending the render pass will add an implicit barrier transitioning the frame buffer color attachment to\n # VK_IMAGE_LAYOUT_PRESENT_SRC_KHR for presenting it to the windowing system\n vk.vkEndCommandBuffer(self.drawCmdBuffers[i])", "def _initialize_buffers(self) -> None:", "def create_buffers(self):", "def screen_vao(cls, gl, program):\n\n vbo = [\n -1.0, -1.0,\n +1.0, -1.0,\n -1.0, +1.0,\n +1.0, +1.0,\n ]\n vbo = np.array(vbo).astype(np.float32)\n vbo = [(gl.buffer(vbo), \"2f\", \"in_pos\")]\n\n ibo = [0, 1, 2, 1, 2, 3]\n ibo = np.array(ibo).astype(np.int32)\n ibo = gl.buffer(ibo)\n\n vao = gl.vertex_array(program, vbo, ibo)\n return vao", "def _build_bufferview(buffer, target, byte_length, byte_offset, byte_stride):\n new_buffer_view = {\n \"buffer\": buffer,\n \"byteLength\": byte_length,\n \"byteOffset\": byte_offset\n }\n\n properties_keys = [\"target\", \"byteStride\"]\n properties_values = [target, byte_stride]\n\n for key, val in zip(properties_keys, properties_values):\n if val is not None:\n new_buffer_view[key] = target\n\n return new_buffer_view", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load buffer per vertex attribute (in list with index = shader layout)\n for loc, data in enumerate(attributes):\n if data is not None:\n # bind a new vbo, upload its data to GPU, declare size and type\n self.buffers.append(GL.glGenBuffers(1))\n data = np.array(data, np.float32, copy=False) # ensure format\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False) # good format\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)", "def initializeGL(self):\n # background color\n gl.glClearColor(0, 0, 0, 0)\n # create a Vertex Buffer Object with the specified data\n self.vbo = glvbo.VBO(self.data)\n # compile the vertex shader\n vs = compile_vertex_shader(VS)\n # compile the fragment shader\n fs = compile_fragment_shader(FS)\n # compile the vertex shader\n self.shaders_program = link_shader_program(vs, fs)\n vs2 = compile_vertex_shader(VS2)\n fs2 = compile_fragment_shader(FS2)\n self.my_shaders_program = link_shader_program(vs2, fs2)", "def __init__(self, attributes, index=None, usage=GL.GL_STATIC_DRAW):\n\n # create vertex array object, bind it\n self.glid = GL.glGenVertexArrays(1)\n GL.glBindVertexArray(self.glid)\n self.buffers = [] # we will store buffers in a list\n nb_primitives, size = 0, 0\n\n # load a buffer per initialized vertex attribute (=dictionary)\n for loc, data in enumerate(attributes):\n if data is None:\n continue\n\n # bind a new vbo, upload its data to GPU, declare its size and type\n self.buffers += [GL.glGenBuffers(1)]\n data = np.array(data, np.float32, copy=False)\n nb_primitives, size = data.shape\n GL.glEnableVertexAttribArray(loc) # activates for current vao only\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ARRAY_BUFFER, data, usage)\n GL.glVertexAttribPointer(loc, size, GL.GL_FLOAT, False, 0, None)\n\n # optionally create and upload an index buffer for this object\n self.draw_command = GL.glDrawArrays\n self.arguments = (0, nb_primitives)\n if index is not None:\n self.buffers += [GL.glGenBuffers(1)]\n index_buffer = np.array(index, np.int32, copy=False)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, self.buffers[-1])\n GL.glBufferData(GL.GL_ELEMENT_ARRAY_BUFFER, index_buffer, usage)\n self.draw_command = GL.glDrawElements\n self.arguments = (index_buffer.size, GL.GL_UNSIGNED_INT, None)\n\n # cleanup and unbind so no accidental subsequent state update\n GL.glBindVertexArray(0)\n GL.glBindBuffer(GL.GL_ARRAY_BUFFER, 0)\n GL.glBindBuffer(GL.GL_ELEMENT_ARRAY_BUFFER, 0)", "def prepare_attrib_mapping(self, primitive):\n buffer_info = []\n for name, accessor in primitive.attributes.items():\n info = VBOInfo(*accessor.info())\n info.attributes.append((name, info.components))\n\n if buffer_info and buffer_info[-1].buffer_view == info.buffer_view:\n if buffer_info[-1].interleaves(info):\n buffer_info[-1].merge(info)\n continue\n\n buffer_info.append(info)\n\n return buffer_info", "def glGetBufferPointerv( baseOperation, target, pname, params=None ):\n if params is None:\n size = glGetBufferParameteriv( target, GL_BUFFER_SIZE )\n data = arrays.ArrayDatatype.zeros( (size,), GL_UNSIGNED_BYTE )\n baseOperation( target, pname, ctypes.byref( data ) )\n return data\n else:\n return baseOperation( target, pname, params )", "def _select(self):\r\n opengles.glBindBuffer(GL_ARRAY_BUFFER, self.vbuf)\r\n opengles.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebuf)", "def _create_bufferview(self, name, buffer, byte_length, byte_offset, byte_stride, target=None):\n new_buffer_view = self._build_bufferview(buffer=self._resolve_mapping(inp=buffer, mapping=self.buffers_map),\n target=target,\n byte_length=byte_length,\n byte_offset=byte_offset,\n byte_stride=byte_stride)\n\n self.bufferViews.append(new_buffer_view)\n\n if name:\n self.bufferViews_map[name] = self._last_index(self.bufferViews)\n\n return self._last_index(self.bufferViews)", "def pc_output_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full(self, *args)", "def _make_buffer(self, width, height):\n fb_prop = p3d.FrameBufferProperties(p3d.FrameBufferProperties.get_default())\n fb_prop.set_multisamples(self._multisamples)\n fb_prop.set_srgb_color(self._srgb_color)\n\n self._buffer = self._engine.make_output(\n self._pipe, name=\"offscreen\", sort=0,\n fb_prop=p3d.FrameBufferProperties.get_default(),\n win_prop=p3d.WindowProperties(size=(width, height)),\n flags=p3d.GraphicsPipe.BFRefuseWindow)\n\n self._region = self._buffer.make_display_region()\n\n self._depth_tex = p3d.Texture()\n self._depth_tex.setFormat(p3d.Texture.FDepthComponent)\n self._buffer.addRenderTexture(\n self._depth_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPDepth)\n\n self._color_tex = p3d.Texture()\n self._color_tex.setFormat(p3d.Texture.FRgba8)\n self._buffer.addRenderTexture(\n self._color_tex, p3d.GraphicsOutput.RTMCopyRam, p3d.GraphicsOutput.RTPColor)", "def initializeGL(self):\n # background color\n gl.glClearColor(0.8, 0.8, 0.8, 0)\n # Make initial data array.\n # compile the vertex shader\n vs = compile_shader(VERTEX, gl.GL_VERTEX_SHADER)\n # compile the geometry shader\n gs = compile_shader(GEOMETRY, gl.GL_GEOMETRY_SHADER)\n # compile the fragment shader\n fs = compile_shader(FRAGMENT, gl.GL_FRAGMENT_SHADER)\n # Link the programs.\n self.render_program = link_shaders(vs, gs, fs)\n # Compile the compute shader\n cs = compile_shader(COMPUTE, gl.GL_COMPUTE_SHADER)\n # Create the compute shader buffers.\n self.makeBuffers()\n #self.vbo = glvbo.VBO(self.attributes)\n self.vbo = gl.glGenBuffers(1)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbo)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, self.attributes.nbytes,\n self.attributes, gl.GL_DYNAMIC_COPY)\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0)\n\n self.ssbo = gl.glGenBuffers(1)\n gl.glBindBufferBase(gl.GL_SHADER_STORAGE_BUFFER, 1, self.ssbo)\n gl.glBufferData(gl.GL_SHADER_STORAGE_BUFFER, self.velocities.nbytes,\n self.velocities, gl.GL_DYNAMIC_COPY)\n self.compute_program = link_shaders(cs)", "def init_shader(self):\r\n self.attrib_locs = {\r\n \"mc_vertex\": -1,\r\n \"vert_tex_coord\": -1,\r\n }\r\n self.uniform_locs = {\r\n \"model_matrix\": -1,\r\n \"view_matrix\": -1,\r\n \"proj_matrix\": -1,\r\n }\r\n vert_prog = self._compile_shader(ORTH_VERT_SOURCE, gl.GL_VERTEX_SHADER)\r\n frag_prog = self._compile_shader(\r\n ORTH_FRAG_SOURCE, gl.GL_FRAGMENT_SHADER)\r\n self.shader = gl.glCreateProgram()\r\n gl.glAttachShader(self.shader, vert_prog)\r\n gl.glAttachShader(self.shader, frag_prog)\r\n gl.glLinkProgram(self.shader)\r\n assert (gl.glGetProgramiv(self.shader, gl.GL_LINK_STATUS) ==\r\n gl.GL_TRUE), (\r\n \"Error: %s\" % (gl.glGetProgramInfoLog(self.shader)))\r\n\r\n self.attrib_locs = {\r\n name: gl.glGetAttribLocation(self.shader, name)\r\n for name in self.attrib_locs\r\n }\r\n self.uniform_locs = {\r\n name: gl.glGetUniformLocation(self.shader, name)\r\n for name in self.uniform_locs\r\n }\r\n\r\n # Load vertices for final ortho view\r\n self.vao = gl.glGenVertexArrays(1)\r\n gl.glBindVertexArray(self.vao)\r\n self.buffers['mc_vertex'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['mc_vertex'])\r\n\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(ORTH_VERTICES),\r\n ORTH_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['mc_vertex'], 4,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['mc_vertex'])\r\n\r\n self.buffers['vert_tex_coord'] = gl.glGenBuffers(1)\r\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.buffers['vert_tex_coord'])\r\n gl.glBufferData(gl.GL_ARRAY_BUFFER, len(TEXTURE_VERTICES),\r\n TEXTURE_VERTICES, gl.GL_STATIC_DRAW)\r\n gl.glVertexAttribPointer(self.attrib_locs['vert_tex_coord'], 2,\r\n gl.GL_FLOAT, False, 0, ctypes.c_void_p(0))\r\n gl.glEnableVertexAttribArray(self.attrib_locs['vert_tex_coord'])\r\n gl.glActiveTexture(gl.GL_TEXTURE0)", "def __init__(self, camera=None, light=None, name=\"\", z=0.1):\r\n super(Canvas, self).__init__(camera, light, name, x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n self.depth = z\r\n\r\n ww = 20.0\r\n hh = 20.0\r\n\r\n self.verts = ((-ww, -hh, z), (0.0, hh, z), (ww, -hh, z))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 1.0), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))", "def __init__(self, buffer_size, random_seed=None):\n self.buffer_size = buffer_size\n self.count = 0\n self.oldPos = 0\n self.currPos = 0\n self.full = False\n self.buffer = []\n self.featCount = 3\n random.seed(random_seed)\n self.useSubBuffer = False", "def re_init_buffer(self):\n #~ print(self.verts)\n #~ print(self.texcoords)\n #~ print(self.inds)\n self.shape.buf[0].re_init(pts=np.array(self.verts, 'f'),texcoords=np.array(self.texcoords, 'f'))", "def pc_input_buffers_full(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_input_buffers_full(self, *args)", "def pc_output_buffers_full_var(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_pc_output_buffers_full_var(self, *args)", "def set_min_output_buffer(self, *args):\n return _add_vector_swig.add_vector_2_cpp_sptr_set_min_output_buffer(self, *args)" ]
[ "0.60485387", "0.5838095", "0.58365405", "0.58271587", "0.58271587", "0.5774059", "0.5710976", "0.5705954", "0.5652349", "0.5554869", "0.5520962", "0.55178773", "0.54938525", "0.53955853", "0.5374403", "0.53721666", "0.5345584", "0.53194004", "0.5283975", "0.5239198", "0.5213602", "0.52115166", "0.5201943", "0.518131", "0.5143271", "0.5130739", "0.5089273", "0.5062824", "0.50595576", "0.49882728" ]
0.69480604
0
updates the carry flag data (VBO3)
def update_carried(self, data): self.use() gpu_data = np.array(data, dtype=np.float32) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3]) gl.glBufferData(gl.GL_ARRAY_BUFFER, gpu_data.nbytes, gpu_data, gl.GL_DYNAMIC_DRAW)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bcs(self, arg):\n\n self.pc += arg if self.p & const.FLAG_CARRY else 0\n self.pc = c_uint16(self.pc).value", "def bvc(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_OVERFLOW else 0\n self.pc = c_uint16(self.pc).value", "def update_flags(self):\n # view mode, filled vs wirefrom\n if self.view['wireframe']:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE)\n else:\n gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_FILL)\n\n # set fullscreen or windowed\n self.set_fullscreen(fullscreen=self.view['fullscreen'])\n\n # backface culling on or off\n if self.view['cull']:\n gl.glEnable(gl.GL_CULL_FACE)\n else:\n gl.glDisable(gl.GL_CULL_FACE)\n\n # case where we WANT an axis and NO vertexlist\n # is stored internally\n if self.view['axis'] and self._axis is None:\n from .. import creation\n # create an axis marker sized relative to the scene\n axis = creation.axis(origin_size=self.scene.scale / 100)\n # create ordered args for a vertex list\n args = rendering.mesh_to_vertexlist(axis)\n # store the axis as a reference\n self._axis = self.batch.add_indexed(*args)\n\n # case where we DON'T want an axis but a vertexlist\n # IS stored internally\n elif not self.view['axis'] and self._axis is not None:\n # remove the axis from the rendering batch\n self._axis.delete()\n # set the reference to None\n self._axis = None", "def _init_buffers(self, v, n, _):\n super()._init_buffers(v, n, _)\n\n self.vbos.append(gl.glGenBuffers(1))\n\n # init VBO 2 - dynamic color data\n gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vbos[3])\n loc = self.get_attribute_location(\"carried\")\n gl.glEnableVertexAttribArray(loc)\n gl.glVertexAttribPointer(loc, 1, gl.GL_FLOAT, gl.GL_FALSE, 0, ctypes.c_void_p(0))\n gl.glVertexAttribDivisor(loc, 1)\n gl.glBufferData(gl.GL_ARRAY_BUFFER, 0, np.array([], dtype=np.float32), gl.GL_DYNAMIC_DRAW)", "def bcc(self, arg):\n\n self.pc += arg if not self.p & const.FLAG_CARRY else 0\n self.pc = c_uint16(self.pc).value", "def bvs(self, arg):\n\n self.pc += arg if self.p & const.FLAG_OVERFLOW else 0\n self.pc = c_uint16(self.pc).value", "def BVC(self, value):\n if not self.reg.V:\n self.reg.PC += value", "def update(self):\n self.bpos_x += 3", "def TAY(self, *_):\n self.reg.Y = self.reg.A\n self.reg.N = self.reg.Y << 7\n self.reg.Z = self.reg.Y == 0", "def _update_bit_features(self):\n index = 1 if self.is_info_v2 else 0\n for feature, keys in BIT_FEATURES.items():\n status = self.lookup_bit(keys[index])\n self._update_feature(feature, status, False)", "def update_control(self):\n self._control_ctr += 0x01", "def update(self, v, r):\n pass", "def BVS(self, value):\n if self.reg.V:\n self.reg.PC += value", "def _update(self, buf):\n\n self.setup()\n\n # TODO there has to be a better way to force the white colour to be used instead of clear...\n\n for i in range(len(buf)):\n if buf[i] & 0xf == 7:\n buf[i] = (buf[i] & 0xf0) + 1\n # print buf[i]\n if buf[i] & 0xf0 == 0x70:\n buf[i] = (buf[i] & 0xf) + 0x10\n # print buf[i]\n\n self._send_command(AC073TC1_DTM, buf)\n\n self._send_command(AC073TC1_PON)\n self._busy_wait(0.4)\n\n self._send_command(AC073TC1_DRF, [0x00])\n self._busy_wait(45.0) # 41 seconds in testing\n\n self._send_command(AC073TC1_POF, [0x00])\n self._busy_wait(0.4)", "def update_frame(self, frame):\n self.set_bank(frame)\n offset = 0\n for chunk in self._chunk(self._buf[frame], 32):\n self.i2c.write_i2c_block_data(self.address, _COLOR_OFFSET + offset, chunk)\n offset += 32", "def recompile(self):\n\n self.vaos = []\n try:\n self.program, uniforms = self.build_prog(self.gl)\n self.u_time, self.u_width, self.u_height = uniforms\n vao = GLUtil.screen_vao(self.gl, self.program)\n self.vaos.append(vao)\n\n self.compute, uniforms, buffers = self.build_cs(self.gl)\n self.u_cstime, self.u_cswidth, self.u_csheight = uniforms\n self.buf_in, self.buf_out = buffers\n\n self.set_gpu_wh(width, height)\n\n self.gx, self.gy = int(width / 8), int(height / 8)\n self.set_gpu_time()\n\n log(\"[Renderer] shader recompiled.\")\n\n except Exception as e:\n log(e)", "def update(self,update_flags):\n pass", "def _select(self):\r\n opengles.glBindBuffer(GL_ARRAY_BUFFER, self.vbuf)\r\n opengles.glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.ebuf)", "def data(self, data, enable):\n app_x, app_y = self.pointer\n self.buffer[\n self.area['start_x'] + app_x][self.area['start_y'] + app_y\n ] = data\n self._inc_pointer()", "def _commit(self):\n ckresult(\n _dll.FMOD_System_Set3DListenerAttributes(\n self._sysptr,\n self._id,\n byref(self._pos),\n byref(self._vel),\n byref(self._fwd),\n byref(self._up),\n )\n )", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def writeBOV(g):\n global counter\n bovNm = 'file_%03d.bov' % counter\n dataNm = 'file_%03d.data' % counter\n counter += 1\n with open(bovNm, 'w') as f:\n f.write('TIME: %g\\n' % float(counter))\n f.write('DATA_FILE: %s\\n' % dataNm)\n if len(g.shape) == 2:\n f.write('DATA_SIZE: %d %d 1\\n' % g.shape)\n elif len(g.shape) == 3:\n f.write('DATA_SIZE: %d %d %d\\n' % g.shape)\n else:\n raise RuntimeError(f'unexpected shape {g.shape}')\n if g.dtype == np.float64:\n f.write('DATA_FORMAT: DOUBLE\\n')\n elif g.dtype == np.int32:\n f.write('DATA_FORMAT: INT\\n')\n else:\n raise RuntimeError(f'unexpected data type {g.dtype}')\n f.write('VARIABLE: U\\n')\n f.write('DATA_ENDIAN: LITTLE\\n')\n f.write('CENTERING: ZONAL\\n')\n f.write('BRICK_ORIGIN: 0. 0. 0.\\n')\n f.write('BRICK_SIZE: 1.0 1.0 1.0\\n')\n with open(dataNm, 'w') as f:\n g.T.tofile(f) # BOV format expects Fortran order", "def appGL(deltaT):#-------------------------------- OpenGL UPDATE\n pass # -> Delete this line if you do something here !", "def vbo( self, mode ):\n uploaded = mode.cache.getData( self, 'buffer' )\n if uploaded is None:\n uploaded = vbo.VBO( \n self.buffer, \n usage=self.gl_usage(), \n target=self.gl_target(),\n ) # TODO: stream type\n holder = mode.cache.holder( self, uploaded, 'buffer' )\n holder.depend( self, 'buffer' )\n return uploaded", "def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)", "def int_33H_3(self):\r\n horizontal_position, vertical_position = MainWindow.get_cursor_poisition()\r\n button_status = 1\r\n\r\n self.registers['CX'].set_bytes(horizontal_position, is_int=True)\r\n self.registers['DX'].set_bytes(vertical_position, is_int=True)\r\n self.registers['BX'].set_bytes(button_status, is_int=True)", "def write_reg3(self, value: int) -> None:\n self.length_ctr_load = value >> 3\n\n # TODO Restart envelope", "def test_add_to_vx(self, cpu):\n for x in range(0x0, 0xF):\n for v in range(0x0, 0xFF):\n for kk in range(0x0, 0xFF):\n cpu.V_register[x] = v\n cpu.opcode = 0x7000 | (x << 8) | kk\n cpu.add_to_vx()\n assert(cpu.V_register[x] == (v + kk) & 0xFF)", "def TXA(self, *_):\n self.reg.A = self.reg.X\n self.reg.N = self.reg.A << 7\n self.reg.Z = self.reg.A == 0", "def TYA(self, *_):\n self.reg.A = self.reg.Y\n self.reg.N = self.reg.A << 7\n self.reg.Z = self.reg.A == 0" ]
[ "0.57576525", "0.55997807", "0.55879223", "0.5496893", "0.54818845", "0.5372455", "0.530587", "0.5240245", "0.51702", "0.5161561", "0.515714", "0.5140299", "0.51236033", "0.5092073", "0.50008583", "0.49901924", "0.49867123", "0.49156582", "0.4907488", "0.4875377", "0.48464125", "0.48419502", "0.4833743", "0.48262808", "0.47982398", "0.47925672", "0.47923496", "0.4788469", "0.4784059", "0.47831526" ]
0.65785253
0
Sets scale control bitword = 0 x, y frozen scales + 1 x is interactive + 2 y is interactive bit value 0/1 frozen/interactive
def set_scale_control(self, scale_ctl=3): self._scale_ctl = scale_ctl
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _force_rescale(self, setpoint_x, setpoint_y):", "def scale(self,id,x,y,s):\n if id not in self.elements.keys():\n print(\"Id input not registered! Please check your process\")\n return False\n element=self.elements[id]\n state=element.scale(self.h-1-y,x,s,self.w,self.h)\n if state==True:\n self.canvas=np.ones((self.h,self.w,3),dtype=np.uint8)*255\n self.sync=False\n return state", "def _scale_setter(self, value: float) -> None:\n self.uaxis.scale = value\n self.vaxis.scale = value", "def setPlotScaling(x,y):\n dislin.trfscl(x,y)", "def setScale(self, mode='ACC', scale=0):\r\n\t\tif mode.upper() == 'ACC':\r\n\t\t\treg = 0x1C\r\n\t\telif mode.upper() == 'GYR':\r\n\t\t\treg = 0x1B\t\t\r\n\t\telse:\r\n\t\t\treturn False\r\n\t\tcurrentVal = self.read(reg)\r\n\t\tcurrentVal = self.dec2BinList(currentVal)\r\n\t\tscale = self.dec2BinList(value=scale,bits=2)\r\n\t\tcurrentVal[3] = scale[0]\r\n\t\tcurrentVal[4] = scale[1]\r\n\t\tcurrentVal = self.binList2Dec(currentVal)\r\n\t\tself.write(reg, currentVal)", "def scale(self):", "def set_scales(self):\r\n self.canvas.update()\r\n self.dxmin = self.dmargin\r\n self.dymin = self.dmargin\r\n self.dxmax = self.canvas.winfo_width() - self.dmargin - 1\r\n self.dymax = self.canvas.winfo_height() - self.dmargin - 1\r\n\r\n # Flip the Y coordinates to invert the result.\r\n if self.y_is_flipped:\r\n self.dymin, self.dymax = self.dymax, self.dymin\r\n\r\n self.xscale = (self.dxmax - self.dxmin) / (self.wxmax - self.wxmin)\r\n self.yscale = (self.dymax - self.dymin) / (self.wymax - self.wymin)\r\n\r\n # Calculate 1 pixel in world coordinates.\r\n self.xpix = 1 / self.xscale\r\n self.ypix = 1 / self.yscale", "def RatingScale(self):\r\n\t\tself.timer.reset()\r\n\t\tself.respKey = []\r\n\t\tself.choice = []\r\n\t\tself.final_choice = []\r\n\t\tself.keyList = self.respKeys + self.acceptKey\r\n\t\tself.hit_accept = False\r\n\t\tself.y = -0.2 # labels y position\r\n\r\n\t\tself._initScaleTitle()\r\n\t\tself._initExtraText()\r\n\t\tself._initScaleInstruct()\r\n\t\tself._initLine(start = (self.xLeft, -0.05), end = (self.xRight,-0.05))\r\n\t\tself._initScaleMarker(fillColor= self.markerColor)\r\n\t\tself._initTickMarks()\r\n\t\tself._initlabelsList()\r\n\t\tself._initlabelsText()\r\n\t\tself._initChoiceText()\r\n\t\tself._initAcceptText()\r\n\r\n\t\tself.scale_marker.setPos((self.xMid,0), log=None)\r\n\r\n\t\tself.scale_title.draw()\r\n\t\tself.scale_extra_text.draw()\r\n\t\tself.scale_instr_text.draw()\r\n\t\tself.scale_line.draw()\r\n\t\tself.scale_marker.draw()\r\n\t\tself.scale_labels_text.draw()\r\n\r\n\t\tfor label in self.labelsList:\r\n\t\t\tself.scale_labels.setText(label)\r\n\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\tself.scale_labels.draw()\r\n\r\n\t\tfor tick in self.tickList:\r\n\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\tself.tick_marks.draw()\r\n\r\n\t\tself.win.flip()\r\n\r\n\t\twhile self.hit_accept == False or self.choice == []:\r\n\t\t\tself.respKey = event.getKeys(keyList = self.keyList)\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.respKeys):\r\n\t\t\t\tself.keyIndex = self.respKeys.index(self.respKey[-1])\r\n\t\t\t\tself.xPos = self.tickList[self.keyIndex]\r\n\t\t\t\tself.choice = self.tickNumber[self.keyIndex]\r\n\t\t\t\tself.choice_text.setText(self.choiceText + str(self.choice))\r\n\t\t\t\tself.scale_title.draw()\r\n\t\t\t\tself.scale_extra_text.draw()\r\n\t\t\t\tself.scale_instr_text.draw()\r\n\t\t\t\tself.scale_line.draw()\r\n\t\t\t\tself.scale_marker.setPos((self.xPos,0), log=None)\r\n\t\t\t\tself.scale_marker.draw()\r\n\t\t\t\tself.scale_labels_text.draw()\r\n\t\t\t\tself.choice_text.draw()\r\n\t\t\t\tself.accept_text.draw()\r\n\t\t\t\tfor label in self.labelsList:\r\n\t\t\t\t\tself.scale_labels.setText(label)\r\n\t\t\t\t\tself.x = self.labelsPosList[self.labelsList.index(label)]\r\n\t\t\t\t\tself.scale_labels.setPos((self.x, self.y),log=None)\r\n\t\t\t\t\tself.scale_labels.draw()\r\n\t\t\t\tfor tick in self.tickList:\r\n\t\t\t\t\tself.tick_marks.setStart((tick,-0.05), log=None)\r\n\t\t\t\t\tself.tick_marks.setEnd((tick,-0.1), log=None)\r\n\t\t\t\t\tself.tick_marks.draw()\r\n\t\t\t\tself.win.flip()\r\n\t\t\tif self.respKey != [] and set(self.respKey).issubset(self.acceptKey):\r\n\t\t\t\tif self.choice != []:\r\n\t\t\t\t\tself.hit_accept = True\r\n\t\t\ttime.sleep(0.2)\r\n\t\tself.response_time = round(self.timer.getTime(),2)\r\n\t\tself.win.setUnits(self.savedUnits, log=None)\r\n\t\tevent.clearEvents('keyboard')\r\n\t\treturn(self.choice, self.response_time)", "def scaleBoard(self, scale):\n self.scaling = scale\n self.my_font.config(size=25 * self.scaling)\n self.reset_button.config(width=40 * self.scaling, height=40 * self.scaling, borderwidth=2 * self.scaling)\n self.board.updateBoardUI(self.scaling)", "def edit_scale(scale, direction):\n if direction in (up, shift_up, plus):\n scale = scale*2\n elif direction in (down, shift_down, minus):\n scale = scale/2\n return scale", "def scale(self, sx, sy):\n frameWidth *= sx\n frameHeight *= sy\n repaint()", "def set_scaling(self, scaling):\n self.scaling = scaling\n self.eff_box_size = int(self.box_size*self.scaling+0.5)", "def setScale(self, sx, sy=None, sz=None):\n self.transform.setScale(sx, sy, sz)", "def myscale(g, factor=1.0):\n g.setdata(factor * g.getdata())\n # if !g.frozen eq 0 then show", "def SetLogicalScale(*args, **kwargs):\n return _gdi_.DC_SetLogicalScale(*args, **kwargs)", "def b_scale_object():\n \n bpy.ops.transform.resize(value=(7.5,1,1), constraint_axis=(True,False,False))\n bpy.ops.transform.resize(value=(1,7.5,1), constraint_axis=(False,True,False))\n bpy.ops.transform.resize(value=(1,1,3.5), constraint_axis=(False,False,True))\n bpy.ops.object.transform_apply(scale=True)", "def scale(self, x, y, z) -> None:\n ...", "def setScalingMode(mode='down'):\n mdict = {'down':'DOWN','full':'FULL'}\n dislin.sclmod(mode)", "def Draw_Scale( self ):\r\n self.canvas_scale.delete(ALL)\r\n if(cb.longx != 0):\r\n value = str( round( cb.longx, 3 ) )\r\n self.canvas_scale.create_line( cb.xorigin,5,cb.xorigin + cb.xtotal,5 )\r\n splits = 10.0\r\n increment = cb.xtotal/splits\r\n for i in range(int(splits + 1)):\r\n self.canvas_scale.create_line( int(cb.xorigin+i*increment),1,int(cb.xorigin+i*increment),9 )\r\n if( self.filter_distance > cb.longx ):\r\n self.filter_distance = cb.longx\r\n x = cb.xtotal - self.filter_distance*cb.xtotal/cb.longx + cb.xorigin\r\n top = str(round(self.filter_distance,3))\r\n \r\n while len(top) < 5:\r\n top = top + \"0\"\r\n self.scale_text = self.canvas_scale.create_text( cb.xorigin + cb.xtotal + 10,1,anchor = \"nw\",text = top + \"/\" + value)\r\n self.scale_marker = self.canvas_scale.create_polygon( x,7, x+4,3, x-4,3, fill=self.highlight_color,outline=self.highlight_color )\r\n if( self.filter_line_on ):\r\n if(self.filter_line != 0 ):\r\n self.canvas_one.delete( self.filter_line )\r\n self.filter_line = self.canvas_one.create_line( x,0,x,self.ys, fill=self.highlight_color)", "def with_scale_op(self, scale):\n\t\tself.variables['scale'] = scale\n\t\treturn self", "def reset_limits(self):\n self.autoscale = True\n self.pixels.autoscale()", "def scale(self, scale):\n\t\tself._current_score *= scale", "def __init__(self,scale):\n self.scale = scale", "def _onToggleScale(self, event):\r\n if self.get_yscale() == 'log':\r\n self.set_yscale('linear')\r\n else:\r\n self.set_yscale('log')\r\n self.subplot.figure.canvas.draw_idle()", "def reset_scale(self) -> None:\n self._scale.set(self._start_val)", "def set_scale(self, xscale=None, yscale=None, zscale=None, reset_camera=True, render=True):\n if xscale is None:\n xscale = self.scale[0]\n if yscale is None:\n yscale = self.scale[1]\n if zscale is None:\n zscale = self.scale[2]\n self.scale = [xscale, yscale, zscale]\n\n # Reset all actors to match this scale\n for actor in self.actors.values():\n if hasattr(actor, 'SetScale'):\n actor.SetScale(self.scale)\n\n self.parent.render()\n if reset_camera:\n self.update_bounds_axes()\n self.reset_camera(render=render)\n self.Modified()", "def ScaleShape(shape, scale_x, scale_y):\n for i, pt in enumerate(shape.points):\n x, y = pt\n shape.points[i] = [scale_x * x, scale_y * y]", "def __init__(self,options,pos):\n self.options = options\n numobjects = pos.shape[1]\n plt.ion() # turn on interactive plotting mode\n dpi=72.0 # set dpi (I think this is appropriate on mac)\n # fig accepts size in inches\n # so divide desired pixel width, height by dpi to get inches\n w,h=(self.options.width/dpi,self.options.height/dpi)\n fig = plt.figure(1,figsize=(w,h),dpi=dpi)\n fig.clear()\n\n #w = self.options.width/fig.get_dpi() # desired width in inches\n #h = self.options.height/fig.get_dpi() # desired height in inches\n #fig.set_size_inches(w,h,forward=True) # last arg resizes the canvas to match\n\n self.ax = plt.axes()\n self.ax.set_xlim(self.options.xmin,self.options.xmax)\n self.ax.set_ylim(self.options.ymin,self.options.ymax)\n #pyplot.axis('scaled')\n\n # I don't know why axis('scaled') doesn't work here\n # But I think the next two commands are equivalent\n self.ax.set_aspect('equal', adjustable='box', anchor='C')\n self.ax.set_autoscale_on(False)\n\n #self.redraw()\n\n\n #facecolors = [cm.jet(x) for x in np.random.rand(len(vicon_objects))]\n facecolors = [cm.jet(x) for x in np.linspace(0,1,numobjects)]\n if self.options.visualize_switch_xy:\n if self.options.axis==1:\n self.ax.axvline(linewidth=4, c='k')\n else:\n self.ax.axhline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,1],pos[:,0],c=facecolors,s=3000)\n else:\n if self.options.axis==1:\n self.ax.axhline(linewidth=4, c='k')\n else:\n self.ax.axvline(linewidth=4, c='k')\n self.col = plt.scatter(pos[:,0],pos[:,1],c=facecolors,s=3000)\n\n # scores\n self.tpos = self.ax.text(0.75*self.options.xmax,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n self.tneg = self.ax.text(0.75*self.options.xmin,0.75*self.options.ymin,str(50),\n size=72,color='k',ha='center',va='center')\n\n self.canvas = agg.FigureCanvasAgg(fig)\n self.canvas.draw()\n self.renderer = self.canvas.get_renderer()\n raw_data = self.renderer.tostring_rgb()\n\n pygame.init()\n \n self.window = pygame.display.set_mode((options.width,options.height), DOUBLEBUF)\n self.screen = pygame.display.get_surface()\n\n self.set_caption(\"Possession: Waiting for Vicon\")\n \n size = self.canvas.get_width_height()\n \n surf = pygame.image.fromstring(raw_data, size, \"RGB\")\n self.screen.blit(surf, (0,0))\n pygame.display.flip()", "def scale(self, scale):\n\n self._scale = scale", "def scale(self, scale):\n\n self._scale = scale" ]
[ "0.67285424", "0.6584716", "0.6431193", "0.6370215", "0.6340126", "0.6294655", "0.62531334", "0.6227982", "0.6212142", "0.6209266", "0.6207113", "0.6194933", "0.6148316", "0.61001164", "0.6055724", "0.60446364", "0.60115176", "0.6009035", "0.59821504", "0.59555095", "0.591816", "0.59128934", "0.58997947", "0.5884049", "0.58770686", "0.58687717", "0.58604145", "0.58489335", "0.5843493", "0.5843493" ]
0.67066544
1
Get versions of EFI, Boot ROM, OS & Mac Device as well as the SysUUID
def gather_system_versions(self): # Get Mac model ID self.hw_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "model", None, 0)).replace( "\x00", "") if "imacpro" in self.hw_version.lower(): # iMac Pro stores it's EFI data different due it's new architecture # so grab the EFI & SMC ROM versions appropriately raw_efi_list = [] raw_rom_info = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "apple-rom-info", None, 0)) for data in raw_rom_info.split("\n"): if data.strip().startswith("BIOS ID"): raw_efi_list = data.split(":")[1].strip().split(".") break else: self.message( "[-] Could not find raw EFI data to determine EFI versions. Exiting....") return False self.efi_version = "%s.%s.%s" % ( raw_efi_list[0], raw_efi_list[2], raw_efi_list[3]) # Can't currently find the SMC version like this on imac pros .... # self.smc_version = str(IORegistryEntryCreateCFProperty(IOServiceGetMatchingService(0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) self.smc_version = "" else: # EFI & SMC ROM versions self.smc_version = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("AppleSMC")), "smc-version", None, 0)) raw_efi = str( IORegistryEntryCreateCFProperty( IORegistryEntryFromPath( 0, "IODeviceTree:/rom"), "version", None, 0)).replace( "\x00", "").split(".") self.efi_version = "%s.%s.%s" % ( raw_efi[0], raw_efi[2], raw_efi[3]) # Set the salt to be the MAC address of the system, using the MAC as a salt in this manner # helps ensure that the hashed sysuuid is pseudonymous. We don't want to know the sysuuid's # value, but we do want it to be unique however. The Salt value is # never submitted to the API salt = hex(getnode()) sys_uuid = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "IOPlatformUUID", None, 0)).replace( "\x00", "") self.h_sys_uuid = hashlib.sha256(salt + sys_uuid).hexdigest() # Get the Board-ID, this is how EFI files are matched to running # hardware - Nastee self.board_id = str( IORegistryEntryCreateCFProperty( IOServiceGetMatchingService( 0, IOServiceMatching("IOPlatformExpertDevice")), "board-id", None, 0)).replace( "\x00", "") # Get OS version self.os_version = commands.getoutput("sw_vers -productVersion") # Get build number self.build_num = commands.getoutput("sw_vers -buildVersion") # Carve out the major version as we use this a bunch # self.os_maj_ver = ".".join(self.os_version.split(".")[:2]) # Add gathered info to the dictionary to query the API with self.endpoints_to_check["127.0.0.1"] = { "hashed_uuid": self.h_sys_uuid, "hw_ver": self.hw_version, "rom_ver": self.efi_version, "smc_ver": self.smc_version, "board_id": self.board_id, "os_ver": self.os_version, "build_num": self.build_num} return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()", "def _get_release_infos():\n \n # support RHEL or CentOS, we don't care about the rest...\n with settings(hide('warnings', 'running', 'stdout', 'stderr'), warn_only=True):\n infos = run('cat /etc/redhat-release')\n \n m = _lsb_release_version.match(infos)\n if m is not None:\n return tuple(m.groups())\n else:\n abort('OS not supported.')", "def software_versions():\n\n quiet = 1\n versions = collections.OrderedDict()\n for package in ['python', 'python3', 'robot', 'firefox', 'google-chrome']:\n # Note: \"robot --version\" returns 0x00000000000000fb.\n # Note: If package does not exist, 0x7f is returned.\n rc, version = gc.shell_cmd(package + \" --version\",\n valid_rcs=[0, 0x7f, 0xfb])\n versions[package] = \"Not installed\" if rc == 0x7f else version.rstrip('\\n')\n\n versions.update(import_versions)\n\n for package in ['robotframework-angularjs', 'robotframework-scplibrary',\n 'robotframework-extendedselenium2library']:\n rc, version = gc.shell_cmd(\"pip3 show \" + package\n + \" | grep Version | sed -re 's/.*: //g'\")\n versions[package] = \"Not installed\" if not version else version.rstrip('\\n')\n\n rc, version = gc.shell_cmd(\"lsb_release -d -s\")\n versions[\"host OS\"] = \"Failed\" if not version else version.rstrip('\\n')\n return versions", "def getbootinfo(self):\n self.mount()\n kernel = None\n inits = []\n for line in self.xlist(\"get-bootinfo\", IBASE)[1]:\n if line.startswith('+++'):\n kernel = line.split()[1]\n else:\n inits.append(line)\n self.unmount()\n if not inits:\n run_error(_(\"No initramfs found\"))\n return None\n if not kernel:\n run_error(_(\"GRUB problem:\\n\") + inits[0])\n return None\n return (kernel, inits)", "def micro_Version(self):\n return tuple(map(ord, self._serial_io(b'\\x56', 2)[0:2]))", "def test_get_hyperflex_server_firmware_version_list(self):\n pass", "def get_msi_versions(vm_address):\n content = \"\"\"\nimport _winreg, sys\nversions = {}\nfor base in [\n \"SOFTWARE\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall\",\n \"SOFTWARE\\\\Wow6432Node\\\\Microsoft\\\\Windows\\\\CurrentVersion\\\\Uninstall\"]:\n try:\n uninstall = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, base)\n except WindowsError:\n continue \n try:\n i = 0\n while 1:\n sub = _winreg.EnumKey(uninstall, i)\n subk = _winreg.OpenKey(_winreg.HKEY_LOCAL_MACHINE, base+'\\\\'+sub)\n j = 0\n progname = version = None\n try:\n while 1:\n name, value, _ = _winreg.EnumValue(subk, j)\n if name == 'DisplayName':\n progname = value\n if name == 'DisplayVersion':\n version = value\n #print >>sys.stderr, i,j, sub, 'entry', name, value\n j += 1\n except WindowsError:\n pass\n if progname:\n versions[progname] = version\n i += 1\n except WindowsError:\n pass\nprint versions\n\"\"\".replace('\\\\', '\\\\\\\\')\n call_exec_daemon('createFile', ['C:\\\\list_installed_programs.py', content],\n host=vm_address)\n try:\n versions = eval(run_via_exec_daemon(['C:\\\\list_installed_programs.py'],\n host=vm_address))\n print 'INSTALL_TOOLS: versions installed=', versions\n finally:\n call_exec_daemon('removeFile', ['C:\\\\list_installed_programs.py'], host=vm_address)\n pass\n return versions", "def check_fw_versions(self, sys_info, api_results):\n if not api_results.get(\"latest_efi_version\"):\n # Call the API to see what the latest version of EFI you are\n # expected to be running given OS ver and mac model\n api_results[\n self.current_endpoint][\"latest_efi_version\"] = self.__make_api_get(\n '/apple/latest_efi_firmware/%s/%s' %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\")))\n\n self.message(\"\\n\\tEFI firmware version check:\")\n\n # Validate response from API\n if self._validate_response(api_results[\"latest_efi_version\"]):\n # Valid response from API - now interpret it\n\n # This is kind messy but it's so as we can detect newer and older firmware and message accordingly rather than just looking for 'different' versions\n # the way that EFI versions are denoted by Apple makes this more of\n # a pain thatit really needs to be quite honestly\n api_efi_str = api_results[\"latest_efi_version\"][\"msg\"].split(\".\")\n my_efi_str = sys_info.get(\"rom_ver\").split(\".\")\n\n api_efi_ver = int(api_efi_str[1], 16)\n api_efi_build = int(api_efi_str[2].replace(\"B\", \"\"), 16)\n\n if all([x.isdigit() for x in my_efi_str]):\n # Newer EFI versions do not include a build number\n # or the Mac model code. The output will be something\n # like 256.0.0, whereas with the old format it would\n # be MBP133.0256.B00.\n my_efi_ver = int(my_efi_str[0], 16)\n my_efi_build = 0\n else:\n my_efi_ver = int(my_efi_str[1], 16)\n my_efi_build = int(my_efi_str[2].replace(\"B\", \"\"), 16)\n\n if api_efi_str == my_efi_str:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n elif my_efi_ver == api_efi_ver and my_efi_build == api_efi_build:\n self.message(\n \"\\t\\t[+] SUCCESS - The EFI Firmware you are running (%s) is the expected version for the OS build you have installed (%s) on your %s\" %\n (sys_info.get(\"rom_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"hw_ver\")))\n\n elif (my_efi_ver > api_efi_ver) or (my_efi_ver > api_efi_ver and my_efi_build > api_efi_build) or (my_efi_ver == api_efi_ver and my_efi_build > api_efi_build):\n # Looks like you're running a beta or a dev build - pretty much\n # all bets are off here as the dataset doens't cover dev builds\n # but a nicer message makes sense\n self.message(\n \"\\t\\t[!] ATTENTION - It looks like your EFI version (%s) is NEWER than the latest production release that is in the dataset (%s). This is most likely because you are now, or have in the past, installed a developer preview OS and as part of that you also had newer EFI firmware installed. The EFIgy API currently only has reliable data for production OS releases.\" %\n (sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))\n\n else:\n self.message(\n \"\\t\\t[-] ATTENTION - You are running an unexpected firmware version given the model of your system (%s) and OS build you have installed (%s). Your firmware is %s, the firmware we expected to see is %s.\\n\" %\n (sys_info.get(\"hw_ver\"), sys_info.get(\"build_num\"), sys_info.get(\"rom_ver\"), api_results[\"latest_efi_version\"][\"msg\"]))", "def get_zhinst_firmware_versions(zi_instruments=None):\n if zi_instruments is None:\n zi_instruments = get_all_connected_zi_instruments()\n\n versions, exceptions = {}, {}\n for node in ['system/fwrevision', 'system/fpgarevision']:\n versions[node] = {}\n for dev in zi_instruments:\n try:\n versions[node][f'{dev.name} - {dev.devname}'] = dev.geti(node)\n except Exception:\n try:\n # for QCodes-based devices\n versions[node][f'{dev.name} - {dev.devname}'] = \\\n dev.daq.getInt(f'{dev.devname}/system/fwrevision')\n except Exception as e:\n exceptions[f'{node} for {dev.devname}'] = e\n return versions, exceptions", "def describe_operating_systems():\n pass", "def __getSuSEVersion(self):\n linuxVendor = \"SuSE\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"grep 'VERSION' /etc/SuSE-release | cut -d= -f2 | tr -d ' \\n'\")\n return linuxVendor.strip(), linuxRelease.strip()", "def get_os_details(self, result, host):\n if \"osmatch\" in result['scan'][host] and len(result['scan'][host][\"osmatch\"]) > 0:\n name = result['scan'][host][\"osmatch\"][0][\"name\"]\n os_family = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osfamily\"]\n os_gen = result['scan'][host][\"osmatch\"][0][\"osclass\"][0][\"osgen\"]\n return [name, os_family, os_gen]\n elif \"osclass\" in result['scan'][host]:\n name = result['scan'][host]['osclass']['vendor']\n os_family = result['scan'][host]['osclass']['osfamily']\n os_gen = result['scan'][host]['osclass']['osgen']\n return [name, os_family, os_gen]\n else:\n return [\"\", \"\", \"\"]", "def firmware_version(self):\n return self._get_system_status()[\"firmware\"]", "def get_version_and_model_spread(devices):\n if isinstance(devices[0], jss.Computer):\n os_type_search = \"hardware/os_name\"\n os_type = \"Mac OS X\"\n os_version_search = \"hardware/os_version\"\n model_search = \"hardware/model\"\n model_identifier_search = \"hardware/model_identifier\"\n else:\n os_type_search = \"general/os_type\"\n os_type = \"iOS\"\n os_version_search = \"general/os_version\"\n model_search = \"general/model\"\n model_identifier_search = \"general/model_identifier\"\n versions, models = [], []\n\n for device in devices:\n if device.findtext(os_type_search) == os_type:\n versions.append(device.findtext(os_version_search) or\n \"No Version Inventoried\")\n models.append(\"%s / %s\" % (\n device.findtext(model_search) or \"No Model\",\n device.findtext(model_identifier_search,) or\n \"No Model Identifier\"))\n version_counts = Counter(versions)\n # Standardize version number format.\n version_counts = fix_version_counts(version_counts)\n model_counts = Counter(models)\n\n total = len(devices)\n\n # Report on OS version spread\n strings = sorted(get_histogram_strings(version_counts, padding=8))\n version_metadata = {\"%s Version Histogram (%s)\" % (os_type, total):\n strings}\n\n # Report on Model Spread\n # Compare on the model identifier since it is an easy numerical\n # sort.\n strings = sorted(get_histogram_strings(model_counts, padding=8),\n cmp=model_identifier_cmp)\n model_metadata = {\"Hardware Model Histogram (%s)\" % total: strings}\n\n return (version_metadata, model_metadata)", "def _get_ilo_firmware_version(self):\n\n manager, reset_uri = self._get_ilo_details()\n ilo_firmware_version = manager['Firmware']['Current']['VersionString']\n return {'ilo_firmware_version': ilo_firmware_version}", "def get_hardware_revision():\n return _pigpio_command(_control, _PI_CMD_HWVER, 0, 0)", "def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'", "def _GetSystemVersion(self, component, info):\n # Check if we are on mario, then we need to use the legacy parser\n if self.ChromeOSBoard() == 'x86-mario':\n return self._GetSystemVersionMario(component, info)\n items = info.strip().splitlines()\n # This is going to give us a list of lines, we are looking for the\n # following ones:\n # BIOS version: board.xx.xx.xxx.xxx.xx\n # EC version: foobar\n for line in items:\n line_components = line.split(':')\n # The line we are looking for has at least 2 items\n if len(line_components) >= 2 and line_components[0] == component:\n return line_components[1].strip()\n self.fail('Could not locate the following item %s in the return value '\n 'of chromeos-firmwareupdate.' % component)", "def mac_ver(release='', versioninfo=('', '', ''), machine=''):\n\n # First try reading the information from an XML file which should\n # always be present\n info = _mac_ver_xml()\n if info is not None:\n return info\n\n # If that also doesn't work return the default values\n return release, versioninfo, machine", "def init_linuxVersion(self):\n releaseDic = collections.OrderedDict() # 排序的字典\n releaseDic['/etc/oracle-release'] = self.__getOracleVersion\n releaseDic['/etc/redhat-release'] = self.__getRedhatVersion\n releaseDic['/etc/debian_version'] = self.__getDebianVersion\n releaseDic['/etc/SuSE-release'] = self.__getSuSEVersion\n # for releaseFilePath in releaseDic.keys():\n # print(releaseFilePath)\n #\n # releaseDic = {'/etc/oracle-release': self.__getOracleVersion,\n # '/etc/redhat-release': self.__getRedhatVersion,\n # '/etc/debian_version': self.__getDebianVersion,\n # '/etc/SuSE-release': self.__getSuSEVersion}\n for releaseFilePath in releaseDic.keys():\n ret, resultErr = self.ksp_ssh.ssh_execute_command(\n '[[ -f %s ]] && echo \"exist\" || echo \"not exist\"' % releaseFilePath)\n if 'not' in ret:\n continue\n else:\n return releaseDic.get(releaseFilePath, self.__getNullVersion)()\n return \"unknownVendor\", \"unknownRelease\"", "def __getRedhatVersion(self):\n result, resultErr = self.ksp_ssh.ssh_execute_command('cat /etc/redhat-release')\n if \"Red\" in result:\n linuxVendor = \"RedHat\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^Red Hat Enterprise Linux.* release /EL/' | sed 's/[ .].*//'\")\n elif \"CentOS\" in result:\n linuxVendor = \"CentOS\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/os-release | grep -w \\\"VERSION\\\"| sed 's/VERSION=\\\"/EL/' | sed 's/[ .].*//'\")\n elif \"Cloud\" in result:\n linuxVendor = \"CloudLinux\"\n linuxRelease, resultErr = self.ksp_ssh.ssh_execute_command(\n \"cat /etc/redhat-release | sed 's/^CloudLinux.*release //' | sed 's/[ .].*//'\")\n else:\n linuxVendor = \"unknownVendor\"\n linuxRelease = \"unknownRelease\"\n return linuxVendor.strip(), linuxRelease.strip()", "def task_get_info(task):\n logger = logging.getLogger(__name__)\n logger.debug('Get JunOS firmware version')\n result = list()\n out = task.run(task=netmiko_send_command,\n command_string=\"show version\", use_textfsm=True)\n# print_result(out)\n if out.failed:\n for host in out.failed_hosts.keys():\n logger.warning(f'Failed task on device {host}')\n task.inventory.hosts[host]['error'] = True\n for host, res in out.items():\n if not res.failed:\n logger.debug(f'Fill JunOS properties {host}')\n task.inventory.hosts[host]['error'] = False\n# with open('output/qtech_show_version.txt','w+') as f:\n# f.write(r.result)\n result.append(parse_info(host, res.result))\n return result", "def get_os_version(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetOsVersion', self.handle)", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def get_firmware_version(self):\n fw_version = {\n \"BIOS\": self._api_helper.read_txt_file(BIOS_VER_PATH),\n \"BMC\": self.__get_bmc_ver(),\n \"SWITCH_CPLD1\": self.__get_cpld_ver(SW_CPLD1_VER_PATH),\n \"SWITCH_CPLD2\": self.__get_cpld_ver(SW_CPLD2_VER_PATH),\n }.get(self.name, \"Unknown\")\n\n return fw_version", "def parse_os_info(self):\n pipe = subprocess.Popen([self.core_exe, '-o'], 0, None, None, subprocess.PIPE)\n lines = pipe.stdout.readlines()\n x = 0\n json_str = ''\n while x < len(lines):\n json_str += lines[x].decode('utf-8').strip()\n x += 1\n decoder = json.decoder.JSONDecoder()\n decoder.strict = False\n self.os_info = decoder.decode(json_str)\n return self.os_info", "def installedVersion():\n\n cmd = f'{dcm2niix()} -h'\n versionPattern = re.compile(r'v'\n r'(?P<major>[0-9]+)\\.'\n r'(?P<minor>[0-9]+)\\.'\n r'(?P<year>[0-9]{4})'\n r'(?P<month>[0-9]{2})'\n r'(?P<day>[0-9]{2})')\n\n try:\n output = sp.check_output(cmd.split()).decode()\n output = [l for l in output.split('\\n') if 'version' in l.lower()]\n output = '\\n'.join(output).split()\n\n for word in output:\n\n match = re.match(versionPattern, word)\n\n if match is not None:\n return (int(match.group('major')),\n int(match.group('minor')),\n int(match.group('year')),\n int(match.group('month')),\n int(match.group('day')))\n\n except Exception as e:\n log.debug(f'Error parsing dcm2niix version string: {e}')\n return None", "def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}", "def read_fw_version(self):\n\n # This function expects the firmware version to be in a line\n # prefixed with 'Product Extra'.\n # At the moment, it takes the form:\n # Product Extra : MCH FW V2.18.8 Final (r14042) (Mar 31 2017 - 11:29)\n # The following two parts will be extracted:\n # mch_fw_ver: V2.18.8 Final\n # mch_fw_date: Mar 31 2017 - 11:29\n # If NAT change the format, then this function will need to be updated\n\n pattern = \".*: MCH FW (.*) \\(.*\\) \\((.*)\\)\"\n\n for mch in range(1,3):\n try:\n result = self.mch_comms.call_ipmitool_command([\"fru\", \"print\", str(mch + MCH_FRU_ID_OFFSET)])\n\n for line in result.splitlines():\n if FW_TAG in line:\n match = re.match(pattern, line)\n if match:\n self.mch_fw_ver[mch] = match.group(1)\n self.mch_fw_date[mch] = match.group(2)\n else:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except CalledProcessError as e:\n self.mch_fw_ver[mch] = \"Unknown\"\n self.mch_fw_date[mch] = \"Unknown\"\n except TimeoutExpired as e:\n print(\"read_fw_version: caught TimeoutExpired exception: {}\".format(e))", "def get_osversion(self):\n\t\treturn call_sdk_function('PrlFoundVmInfo_GetOSVersion', self.handle)" ]
[ "0.6673707", "0.6660692", "0.6526378", "0.628747", "0.62810564", "0.62695277", "0.62047887", "0.619877", "0.6136058", "0.613013", "0.61185354", "0.61024153", "0.60582775", "0.6051702", "0.5979974", "0.5965632", "0.59549516", "0.59407663", "0.59400725", "0.5934368", "0.5919144", "0.591702", "0.59029275", "0.5858749", "0.58469945", "0.58390516", "0.5832411", "0.58212596", "0.58085746", "0.5805433" ]
0.7456519
0
Given the OS version are you running, what is the highest available build number? Are you running it?
def check_highest_build(self, sys_info, api_results): if not api_results.get("latest_build_number"): self.results[self.current_endpoint]["latest_build_number"] = self.__make_api_get( '/apple/latest_build_number/%s' % (".".join(sys_info.get("os_ver").split(".")[:2]))) self.message("\n\tHighest build number check:") # Validate response from API if self._validate_response(api_results["latest_build_number"]): # Valid response from API - now interpret it if api_results["latest_build_number"][ "msg"] == sys_info.get("build_num"): self.message( "\t\t[+] SUCCESS - You are running the latest build number (%s) of the OS version you have installed (%s)" % (sys_info.get("build_num"), sys_info.get("os_ver"))) elif sys_info.get("build_num")[-1].isalpha(): self.message( "\t\t[!] ATTENTION - It looks like you might be running a development OS build '%s' (%s). The EFIgy API currently only has reliable data for production OS releases." % (sys_info.get("build_num"), sys_info.get("os_ver"))) else: self.message( "\t\t[-] ATTENTION - You are NOT running the latest release build number of your OS version (%s). Your build number is %s, the latest release build number is %s" % (sys_info.get("os_ver"), sys_info.get("build_num"), api_results["latest_build_number"]["msg"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def version_max():\n return VERSION_MAX", "def get_max_build_version(version: str) -> str:\n return Version(version).bump_minor().get_stable().dumps()", "def get_build_number():\n try:\n return int(os.getenv(*legion.config.BUILD_NUMBER))\n except ValueError:\n raise Exception('Cannot parse build number as integer')", "def python_build():\n return _sys_version()[4:6]", "def get_latest_build(self):\n # Retrieve last sanity-checked build number (could be 0)\n self.get_last_sanity()\n\n # * List all build numbers for this version. Note this may include\n # builds for other versions, since all versions for a given\n # release share a build directory.\n # * Ignore builds above 50000, which are toy builds\n\n builds = [int(x) for x in os.listdir(self.ver_dir)\n if x.isdigit() and int(x) > self.last_bld and int(x) < 50000]\n builds.sort()\n\n # Check each build after last sanity-checked build\n bld_num = self.last_bld\n for build in builds:\n print (\"Checking build \" + str(build))\n if self.check_build(build):\n bld_num = build\n print(\"bld_num is now \" + str(bld_num))\n return bld_num", "def osversion():\n return platform()", "def get_os_release():\n if platform.linux_distribution()[0]:\n return \" \".join(platform.linux_distribution())\n elif platform.mac_ver()[0]:\n return \"%s %s\" % (platform.mac_ver()[0], platform.mac_ver()[2])\n else:\n return \"Unknown\"", "def _get_build_os_name():\n system = platform.system()\n if 'Darwin' in system or 'Macintosh' in system:\n return 'darwin-x86'\n\n # TODO: Add more values if needed.\n return 'linux-x86'", "def last_available_os_version(self) -> str:\n return pulumi.get(self, \"last_available_os_version\")", "def systemversionstr():\n return platform.uname().system", "def _get_version(self):\n version = self.job_config.get(\"os_version\")\n if not version:\n version = DEFAULT_OS_VERSION.get(self.os_type)\n\n return str(version)", "def getOsVersion():\n os_version_tuple = platform.mac_ver()[0].split('.')\n return int(os_version_tuple[1])", "def get_chromeos_version():\r\n try:\r\n get_board_property('CHROMEOS_RELEASE_VERSION')\r\n except:\r\n logging.info(\"CHROMEOS_RELEASE_VERSION not found\")\r\n return -1", "def get_host_os_minor(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostOsMinor', self.handle)", "def var_BUILD_OS(self):\n return _get_build_os_name()", "def get_version():\n # this implementation avoids calling Foundation and will work on\n # non Apple OSes.\n vers = \"UNKNOWN\"\n build = \"\"\n # find the munkilib directory, and the version file\n munkilibdir = os.path.dirname(os.path.abspath(__file__))\n versionfile = os.path.join(munkilibdir, \"version.plist\")\n if os.path.exists(versionfile):\n try:\n vers_plist = readPlist(versionfile)\n except (IOError, OSError, ExpatError):\n pass\n else:\n try:\n vers = vers_plist['CFBundleShortVersionString']\n build = vers_plist['BuildNumber']\n except KeyError:\n pass\n if build:\n vers = vers + \".\" + build\n return vers", "def commcare_minor_release(self):\n return '%d.%d' % self.build_spec.minor_release()", "def build_version(self):\n return self.nodes[0].get('infos').get('system_info').get('system_version')", "def latestidd():\n pth, _ = run_functions.install_paths(version='8.8.0') # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith('EnergyPlus')]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def getBuild(number):", "def getBuild(number):", "def latestidd():\n pth, _ = run_functions.install_paths(\n version=\"8.8.0\"\n ) # works with any value in version\n dirpth = os.path.dirname(pth)\n dirpth = os.path.dirname(dirpth)\n alldirs = os.listdir(dirpth)\n eplusdirs = [dir for dir in alldirs if dir.startswith(\"EnergyPlus\")]\n maxapp = max(eplusdirs)\n ver = folder2ver(maxapp)\n return ver", "def version():\n return uname().version", "def version():\n return uname().version", "def platform_version(self) -> Optional[str]:\n return pulumi.get(self, \"platform_version\")", "def version():\n cmd = \"{} -v\".format(_detect_os())\n out = __salt__[\"cmd.run\"](cmd).splitlines()\n ret = out[0].split(\": \")\n return ret[1]", "def fpga_minor():\n return int, None", "def find_xcode_major_version():\n cmd = ['xcodebuild', '-version']\n command_trace.log(cmd)\n\n result = str(subprocess.check_output(cmd))\n version = result.split('\\n', 1)[0]\n version = re.sub(r'Xcode ', '', version)\n version = re.sub(r'\\..*', '', version)\n return int(version)", "def getversion(): # 3\n res,resargs = _msk.Env.getversion()\n if res != 0:\n raise Error(rescode(res),\"\")\n _major_return_value,_minor_return_value,_build_return_value,_revision_return_value = resargs\n return _major_return_value,_minor_return_value,_build_return_value,_revision_return_value", "def current_platform() -> str:\n if sys.platform.startswith('linux'):\n return 'linux'\n elif sys.platform.startswith('darwin'):\n return 'mac'\n elif (sys.platform.startswith('win') or\n sys.platform.startswith('msys') or\n sys.platform.startswith('cyg')):\n if sys.maxsize > 2 ** 31 - 1:\n return 'win64'\n return 'win32'\n else:\n print('Error: DO NOT SUPPORT OS', file=sys.stderr)\n sys.exit(1)" ]
[ "0.74187696", "0.71662873", "0.7020225", "0.70104754", "0.6995423", "0.6940619", "0.67905074", "0.6742232", "0.66234714", "0.6523554", "0.65176624", "0.6482276", "0.6474209", "0.6407805", "0.6395835", "0.63706833", "0.63454497", "0.6344706", "0.6331214", "0.6328722", "0.6328722", "0.6296596", "0.6291574", "0.6291574", "0.62871385", "0.6262032", "0.6254447", "0.62434715", "0.620778", "0.6196568" ]
0.7350579
1
Preprocess graphs by casting into FloatTensor and setting to cuda if available
def preprocess(dataset, cuda): for g, _ in dataset: for key_g, val_g in g.ndata.items(): processed = g.ndata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.ndata[key_g] = processed for key_g, val_g in g.edata.items(): processed = g.edata.pop(key_g) processed = processed.type('torch.FloatTensor') if cuda: processed = processed.cuda() g.edata[key_g] = processed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def to_cuda(network):\n network.cuda()\n\n network._to_cuda_forward_cache = network.forward\n\n def cuda_forward(x):\n return network._to_cuda_forward_cache(x.cuda(non_blocking=True))\n\n network.forward = cuda_forward", "def cuda_if_gpu(T):\n\n return T.cuda() if use_cuda else T", "def __init__(self, model_path, gpu_fraction=1.0,\n input_name = 'input_1:0',\n output_name = 'output_node0:0',\n optimize = True,\n optimizer_args = None):\n\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n\n if optimize:\n if type(output_name) == list:\n sensitive_nodes = output_name\n else:\n sensitive_nodes = [output_name]\n graph_def = optimizeGraph(graph_def,\n sensitive_nodes,\n optimizer_args)\n if type(output_name) == list:\n return_elements = [input_name, *output_name]\n tensors = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n # The first is an input\n self.input_tensor = tensors[0]\n # The rest are outputs\n self.output_tensor = tensors[1:]\n else:\n return_elements = [input_name, output_name]\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=return_elements)\n\n self.input_shape = self.input_tensor.get_shape().as_list()", "def trace_cpu(self, graph, tensor_fetches, op_fetches=None):\n if isinstance(graph, func_graph.FuncGraph) or isinstance(\n graph, function._FuncGraph): # pylint: disable=protected-access\n logging.warning('Tensor Tracer is not supported for tracing FuncGraphs. '\n 'Ignoring tracing.')\n return tensor_fetches\n\n if graph in TensorTracer._traced_graphs:\n logging.warning('Graph is already rewritten with tensor tracer, ignoring '\n 'multiple calls.')\n return tensor_fetches\n else:\n TensorTracer._traced_graphs.add(graph)\n # Reset the parameters in case parameters are changed.\n self._parameters = tensor_tracer_flags.TTParameters()\n\n self._tt_config.device_type = _DEVICE_TYPE_CPU\n self._tt_config.num_replicas = 1\n self._tt_config.num_replicas_per_host = 1\n self._tt_config.num_hosts = 1\n self._replica_id = 0\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_before_tt.pbtxt')\n with graph.as_default():\n tensor_fetches = self._trace_execution(graph, tensor_fetches, op_fetches,\n on_tpu=False)\n if self._parameters.graph_dump_path:\n graph_io.write_graph(graph, self._parameters.graph_dump_path,\n 'graph_after_tt.pbtxt')\n return tensor_fetches", "def set_default_tensor_type(device):\r\n if device in [torch.device(\"cpu\"), \"cpu\"]:\r\n torch.set_default_tensor_type(torch.FloatTensor)\r\n else:\r\n torch.set_default_tensor_type(torch.cuda.FloatTensor)", "def _addCastOps(self, user_graph_def):\n # Load user-specified graph into memory\n user_graph = tf.Graph()\n with user_graph.as_default():\n tf.import_graph_def(user_graph_def, name=\"\")\n\n # Build a subgraph containing our injected ops\n # TODO: Cheap optimization: if all input tensors are of type float64, just do nothing here\n injected_op_subgraph = tf.Graph()\n # Maps names of input tensors in our original graph to outputs of the injected-op subgraph\n input_map = {}\n with injected_op_subgraph.as_default():\n with tf.name_scope(self.SPARKDL_OP_SCOPE):\n for _, orig_tensor_name in self.getInputMapping():\n orig_tensor = tfx.get_tensor(orig_tensor_name, user_graph)\n # Create placeholder with same shape as original input tensor, but that accepts\n # float64 input from Spark.\n spark_placeholder = tf.placeholder(tf.float64, shape=orig_tensor.shape,\n name=tfx.op_name(orig_tensor_name))\n # If the original tensor was of type float64, just pass through the Spark input\n if orig_tensor.dtype == tf.float64:\n input_map[orig_tensor_name] = spark_placeholder\n # Otherwise, cast the Spark input to the datatype of the original tensor\n else:\n input_map[orig_tensor_name] = tf.cast(spark_placeholder,\n dtype=orig_tensor.dtype)\n tf.import_graph_def(graph_def=user_graph_def, input_map=input_map, name=\"\")\n return injected_op_subgraph.as_graph_def(add_shapes=True)", "def local_gpu_lazy_ifelse(node):\r\n if isinstance(node.op, theano.ifelse.IfElse) and not node.op.gpu:\r\n gpu_ifelse = theano.ifelse.IfElse(node.op.n_outs, gpu=True)\r\n outs_clients = reduce(list.__add__,\r\n [out.clients for out in node.outputs])\r\n if any([(i.owner and isinstance(i.owner.op, HostFromGpu))\r\n for i in node.inputs]) or any(\r\n [c != 'output' and c.op == gpu_from_host for c, idx\r\n in outs_clients]):\r\n\r\n c = node.inputs[0]\r\n outs = node.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n return [host_from_gpu(out) for out in\r\n gpu_ifelse.make_node(c, *outs).outputs]\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n host_input = node.inputs[0]\r\n if (host_input.owner and\r\n isinstance(host_input.owner.op, theano.ifelse.IfElse) and\r\n not host_input.owner.op.gpu and\r\n # If there is more then 1 outputs, we can't replace it\r\n # here with a local optimizer as we replace the\r\n # GpuFromHost node and the other output of the if won't be\r\n # replaced.\r\n host_input.owner.op.n_outs == 1):\r\n gpu_ifelse = theano.ifelse.IfElse(host_input.owner.op.n_outs,\r\n gpu=True)\r\n\r\n c = host_input.owner.inputs[0]\r\n outs = host_input.owner.inputs[1:]\r\n # Should not happen, but just in case\r\n if isinstance(c.type, CudaNdarrayType):\r\n c = host_from_gpu(c)\r\n\r\n for i in range(len(outs)):\r\n if not isinstance(outs[i], CudaNdarrayType):\r\n outs[i] = gpu_from_host(outs[i])\r\n\r\n outs = gpu_ifelse.make_node(c, *outs).outputs\r\n return outs\r\n\r\n return False", "def cuda(self):\n for i in self.modules:\n if torch.cuda.is_available():\n self.modules[i] = self.modules[i].cuda()", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return CudaNdarrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n if isinstance(node.op, GpuFromHost):\r\n #gpu_from_host(conv) -> gpu_conv(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op, conv.ConvOp):\r\n gpu_conv = GpuConvOp_from_ConvOp(host_input.owner.op)\r\n if gpu_conv is None:\r\n return\r\n img, kern = host_input.owner.inputs\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(out,\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]\r\n\r\n if isinstance(node.op, conv.ConvOp):\r\n #conv(host_from_gpu) -> host_from_gpu(gpu_conv)\r\n img, kern = node.inputs\r\n img_on_gpu = (img.owner and isinstance(img.owner.op, HostFromGpu))\r\n kern_on_gpu = (kern.owner and isinstance(kern.owner.op, HostFromGpu))\r\n if img_on_gpu or kern_on_gpu:\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n out.values_eq_approx = values_eq_approx\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n return [out]", "def local_gpu_conv(node):\r\n def GpuConvOp_from_ConvOp(op):\r\n logical_img_hw = None\r\n\r\n if op.kshp_logical is not None and op.kshp_logical != op.kshp:\r\n return None\r\n #print op.kshp, op.imshp[1:3]\r\n #print op.kshp_logical, logical_img_hw\r\n ret = GpuConv(border_mode=op.out_mode,\r\n subsample=(op.dx, op.dy),\r\n logical_img_hw=logical_img_hw,\r\n logical_kern_hw=op.kshp_logical,\r\n logical_kern_align_top=op.kshp_logical_top_aligned,\r\n kshp=op.kshp,\r\n version=op.version,\r\n verbose=op.verbose,\r\n imshp=op.imshp,\r\n )\r\n if op.imshp_logical is not None:\r\n logical_img_hw = op.imshp_logical[1:3]\r\n if logical_img_hw != op.imshp[1:3]:\r\n # this case is not implemented\r\n #return None\r\n rstride = int(numpy.ceil(op.imshp_logical[1] /\r\n float(op.imshp[1])))\r\n cstride = int(numpy.ceil(op.imshp_logical[2] /\r\n float(op.imshp[2])))\r\n\r\n def make_graph(img, kern):\r\n buf = tensor.alloc(numpy.asarray(0, dtype=img.dtype),\r\n img.shape[0], *op.imshp_logical)\r\n img = tensor.set_subtensor(buf[:, :, ::rstride, ::cstride],\r\n img)\r\n img = gpu_from_host(img)\r\n return ret(img, kern)\r\n\r\n return make_graph\r\n return ret\r\n\r\n def values_eq_approx(a, b):\r\n \"\"\"This fct is needed to don't have DebugMode raise useless\r\n error due to ronding error.\r\n\r\n This happen as We reduce on the two last dimensions, so this\r\n can raise the absolute error if the number of element we\r\n reduce on is significant.\r\n\r\n \"\"\"\r\n assert a.ndim == 4\r\n atol = None\r\n if a.shape[-1] * a.shape[-2] > 100:\r\n #For float32 the default atol is 1e-5\r\n atol = 3e-5\r\n return GpuArrayType.values_eq_approx(a, b, atol=atol)\r\n\r\n img, kern = node.inputs\r\n gpu_conv = GpuConvOp_from_ConvOp(node.op)\r\n if gpu_conv is None:\r\n return\r\n out = gpu_conv(gpu_from_host(img),\r\n gpu_from_host(kern))\r\n # in some case the ConvOp broadcast the last 2 dimensions\r\n # differently then the gpu ConvOp\r\n out = tensor.patternbroadcast(\r\n host_from_gpu(out),\r\n node.outputs[0].broadcastable)\r\n #op_lifter want the output on the GPU.\r\n out = gpu_from_host(out)\r\n out.values_eq_approx = values_eq_approx\r\n return [out]", "def local_to_gpu(node):\r\n if isinstance(node.op, op):\r\n #op(host_from_gpu()) -> host_from_gpu(op)\r\n #If any of the input that go on the GPU are on the GPU,\r\n #move the op to the gpu.\r\n if any(node.inputs[idx].owner and\r\n isinstance(node.inputs[idx].owner.op, cuda.HostFromGpu)\r\n for idx in to_gpu):\r\n new_inp = list(node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [cuda.host_from_gpu(op()(*new_inp))]\r\n if node.op == cuda.gpu_from_host:\r\n #gpu_from_host(op) -> op(gpu_from_host)\r\n host_input = node.inputs[0]\r\n if host_input.owner and isinstance(host_input.owner.op,\r\n op):\r\n op_node = host_input.owner\r\n new_inp = list(op_node.inputs)\r\n for idx in to_gpu:\r\n new_inp[idx] = cuda.gpu_from_host(new_inp[idx])\r\n return [op()(*new_inp)]\r\n return False", "def convert_prelu(g, op, block):\n\n x = g.get_node(op.input(\"X\")[0])\n alpha = g.get_node(op.input(\"Alpha\")[0])\n ndims = len(infer_shape(x))\n axis = 0 if ndims <= 1 else 1\n mode = op.attr(\"mode\")\n if mode == \"all\":\n if ndims == 1:\n shape = _op.strided_slice(shape_of(x), [0], [1])\n else:\n shape = _op.strided_slice(shape_of(x), [1], [2])\n alpha = _op.broadcast_to(alpha, fold_constant(shape))\n out = _op.nn.prelu(x, alpha, axis)\n g.add_node(op.output(\"Out\")[0], out)", "def move_variable_initialization_to_cpu(graph=None):\n if not graph:\n graph = ops.get_default_graph()\n\n init_ops = []\n dep_ops = list(\n map(lambda x: x.initializer.inputs[1].op,\n graph.get_collection('variables')))\n visited = set()\n\n while len(dep_ops) > 0:\n op = dep_ops.pop()\n if not op in visited:\n visited.add(op)\n init_ops += [op]\n dep_ops += map(lambda x: x.op, op.inputs)\n\n for op in init_ops:\n op._set_device('/device:CPU:0')\n op._set_attr('_class', attr_value_pb2.AttrValue(s=b'loc:@cpu'))\n op._set_attr('_XlaCompile', attr_value_pb2.AttrValue(b=False))\n op._set_attr('_XlaScope', attr_value_pb2.AttrValue(s=b''))\n\n return", "def process_initializer():\n os.environ['TF_CPP_MIN_LOG_LEVEL'] = '1'\n import tensorflow as tf\n physical_devices = tf.config.experimental.list_physical_devices('GPU')\n if len(physical_devices) > 0:\n os.environ['CUDA_VISIBLE_DEVICES'] = '0'\n tf.config.experimental.set_memory_growth(physical_devices[0], True)", "def to_cuda(*args):\n return [None if x is None else x.cuda() for x in args]", "def __init__(self, model_path, img_width, img_height, gpu_fraction=1.0):\n # Create session first with requested gpu_fraction parameter\n config = tf.compat.v1.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction\n self.tf_session = tf.compat.v1.Session(config=config)\n\n with tf.io.gfile.GFile(model_path, 'rb') as graph_file:\n # Load graph off of disk into a graph definition\n graph_def = tf.compat.v1.GraphDef()\n graph_def.ParseFromString(graph_file.read())\n self.input_tensor, self.output_tensor = tf.import_graph_def(\n graph_def,\n return_elements=['input_1:0', 'cumsum_values_1:0'])\n\n self.img_width = img_width\n self.img_height = img_height", "def set_default_tensor_type():\n if torch.cuda.is_available():\n torch.set_default_tensor_type('torch.cuda.FloatTensor')\n else:\n print(\"ERROR: cuda is not available. Test will exit.\")", "def inference():\n inf_dataset = dataset\n net.eval()\n frames_gen, frame_cnt, rel_props, prop_ticks, prop_scaling = inf_dataset[index]\n \n num_crop = args.test_crops\n length = 3\n if args.modality == 'Flow':\n length = 10\n elif args.modality == 'RGBDiff':\n length = 18\n \n # First get the base_out outputs\n base_output = torch.autograd.Variable(torch.zeros((num_crop, frame_cnt, base_out_dim)).cuda(),\n volatile=True)\n cnt = 0\n for frames in frames_gen:\n # frames.shape == [frame_batch_size * num_crops * 3, 224, 224]\n # frame_batch_size is 4 by default\n input_var = torch.autograd.Variable(frames.view(-1, length, frames.size(-2), frames.size(-1)).cuda(),\n volatile=True)\n base_out = net(input_var, None, None, None, None)\n bsc = base_out.view(num_crop, -1, base_out_dim)\n base_output[:, cnt:cnt+bsc.size(1), :] = bsc\n cnt += bsc.size(1)\n\n n_frames = base_output.size(1)\n assert frame_cnt == n_frames\n # GLCU\n step_features = base_output.mean(dim=0).mean(dim=0).unsqueeze(0)\n gate, glcu_task_pred = net.glcu(step_features)\n glcu_task_pred = F.softmax(glcu_task_pred.squeeze(), dim=0).data.cpu().numpy()\n gate = gate.repeat(1, num_crop * n_frames).view(num_crop, n_frames, base_out_dim)\n if net.additive_glcu:\n base_output = base_output + gate\n else:\n base_output = base_output * gate\n\n # output.shape == [num_frames, 7791]\n output = torch.zeros((frame_cnt, output_dim)).cuda()\n cnt = 0\n for i in range(0, frame_cnt, 4):\n base_out = base_output[:, i:i+4, :].contiguous().view(-1, base_out_dim)\n rst = net.test_fc(base_out)\n sc = rst.data.view(num_crop, -1, output_dim).mean(dim=0)\n output[cnt: cnt + sc.size(0), :] = sc\n cnt += sc.size(0)\n base_output = base_output.mean(dim=0).data\n\n # act_scores.shape == [num_proposals, K+1]\n # comp_scores.shape == [num_proposals, K]\n act_scores, comp_scores, reg_scores = reorg_stpp.forward(output, prop_ticks, prop_scaling)\n act_scores = torch.autograd.Variable(act_scores, volatile=True)\n comp_scores = torch.autograd.Variable(comp_scores, volatile=True)\n\n # Task Head\n combined_scores = F.softmax(act_scores[:, 1:], dim=1) * torch.exp(comp_scores)\n combined_scores = combined_scores.mean(dim=0).unsqueeze(0)\n task_pred = F.softmax(net.task_head(combined_scores).squeeze(), dim=0).data.cpu().numpy()\n\n act_scores = act_scores.data\n comp_scores = comp_scores.data\n\n if reg_scores is not None:\n reg_scores = reg_scores.view(-1, num_class, 2)\n reg_scores[:, :, 0] = reg_scores[:, :, 0] * stats[1, 0] + stats[0, 0]\n reg_scores[:, :, 1] = reg_scores[:, :, 1] * stats[1, 1] + stats[0, 1]\n\n torch.cuda.empty_cache() # To empty the cache from previous iterations\n\n # perform stpp on scores\n return ((inf_dataset.video_list[index].id,\n (rel_props.numpy(), act_scores.cpu().numpy(), comp_scores.cpu().numpy(), reg_scores.cpu().numpy(), \n glcu_task_pred, task_pred),\n output.cpu().numpy(),\n base_output.cpu().numpy()))", "def test_hostfromgpu_shape_i():\r\n pass\r\n\r\n m = mode_with_gpu.including('local_dot_to_dot22',\r\n 'local_dot22_to_dot22scalar','specialize')\r\n a = T.fmatrix('a')\r\n ca = theano.sandbox.cuda.var.CudaNdarrayType((False, False))()\r\n\r\n av = numpy.asarray(numpy.random.rand(5, 4), dtype='float32')\r\n cv = cuda.CudaNdarray(numpy.asarray(numpy.random.rand(5, 4),\r\n dtype='float32'))\r\n\r\n f = theano.function([a], cuda.basic_ops.gpu_from_host(a), mode=m)\r\n assert cuda.basic_ops.gpu_from_host in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([a], cuda.basic_ops.gpu_from_host(a).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, T.opt.Shape_i)\r\n assert isinstance(topo[1].op, T.opt.Shape_i)\r\n assert isinstance(topo[2].op, T.opt.MakeVector)\r\n assert tuple(f(av)) == (5, 4)\r\n\r\n\r\n\r\n f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca), mode=m)\r\n assert cuda.basic_ops.host_from_gpu in [x.op\r\n for x in f.maker.fgraph.toposort()]\r\n f = theano.function([ca], cuda.basic_ops.host_from_gpu(ca).shape, mode=m)\r\n topo = f.maker.fgraph.toposort()\r\n assert isinstance(topo[0].op, T.opt.Shape_i)\r\n assert isinstance(topo[1].op, T.opt.Shape_i)\r\n assert isinstance(topo[2].op, T.opt.MakeVector)\r\n assert tuple(f(cv)) == (5, 4)", "def _to_cpu(data: Any) -> Any:\n if isinstance(data, (Tensor, BaseDataElement)):\n return data.to('cpu')\n elif isinstance(data, list):\n return [_to_cpu(d) for d in data]\n elif isinstance(data, tuple):\n return tuple(_to_cpu(d) for d in data)\n elif isinstance(data, dict):\n return {k: _to_cpu(v) for k, v in data.items()}\n else:\n return data", "def cuda(self):\n if torch.cuda.is_available():\n self.automata = self.automata.cuda()\n self.inv_automata = self.inv_automata.cuda()\n self.action = self.action.cuda()\n self.inv_action = self.inv_action.cuda()", "def update(self, batch):\n if self.opt['cuda']:\n inputs = [Variable(torch.LongTensor(b).cuda()) for b in batch[:3]]\n subj_start_binary = Variable(torch.LongTensor(batch[5]).cuda()).float()\n subj_end_binary = Variable(torch.LongTensor(batch[6]).cuda()).float()\n obj_start_relation = Variable(torch.LongTensor(batch[7]).cuda())\n obj_end_relation = Variable(torch.LongTensor(batch[8]).cuda())\n subj_start_type = Variable(torch.LongTensor(batch[9]).cuda())\n subj_end_type = Variable(torch.LongTensor(batch[10]).cuda())\n obj_start_type = Variable(torch.LongTensor(batch[11]).cuda())\n obj_end_type = Variable(torch.LongTensor(batch[12]).cuda())\n nearest_subj_start_position_for_each_token = Variable(torch.LongTensor(batch[13]).cuda())\n distance_to_nearest_subj_start = Variable(torch.LongTensor(batch[14]).cuda())\n distance_to_subj = Variable(torch.LongTensor(batch[15]).cuda())\n nearest_obj_start_position_for_each_token = Variable(torch.LongTensor(batch[3]).cuda())\n distance_to_nearest_obj_start = Variable(torch.LongTensor(batch[4]).cuda())\n else:\n inputs = [Variable(torch.LongTensor(b)) for b in batch[:4]]\n subj_start_label = Variable(torch.LongTensor(batch[4])).float()\n subj_end_label = Variable(torch.LongTensor(batch[5])).float()\n obj_start_label = Variable(torch.LongTensor(batch[6]))\n obj_end_label = Variable(torch.LongTensor(batch[7]))\n subj_type_start_label = Variable(torch.LongTensor(batch[8]))\n subj_type_end_label = Variable(torch.LongTensor(batch[9]))\n obj_type_start_label = Variable(torch.LongTensor(batch[10]))\n obj_type_end_label = Variable(torch.LongTensor(batch[11]))\n subj_nearest_start_for_each = Variable(torch.LongTensor(batch[12]))\n subj_distance_to_start = Variable(torch.LongTensor(batch[13]))\n \n \n mask = (inputs[0].data>0).float()\n # step forward\n self.model.train()\n self.optimizer.zero_grad()\n\n \n subj_start_logits, subj_end_logits, obj_start_logits, obj_end_logits = self.model(inputs, distance_to_subj)\n\n subj_start_loss = self.obj_criterion(subj_start_logits.view(-1, self.opt['num_subj_type']+1), subj_start_type.view(-1).squeeze()).view_as(mask)\n subj_start_loss = torch.sum(subj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n subj_end_loss = self.obj_criterion(subj_end_logits.view(-1, self.opt['num_subj_type']+1), subj_end_type.view(-1).squeeze()).view_as(mask)\n subj_end_loss = torch.sum(subj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_start_loss = self.obj_criterion(obj_start_logits.view(-1, self.opt['num_class']+1), obj_start_relation.view(-1).squeeze()).view_as(mask)\n obj_start_loss = torch.sum(obj_start_loss.mul(mask.float()))/torch.sum(mask.float())\n \n obj_end_loss = self.obj_criterion(obj_end_logits.view(-1, self.opt['num_class']+1), obj_end_relation.view(-1).squeeze()).view_as(mask)\n obj_end_loss = torch.sum(obj_end_loss.mul(mask.float()))/torch.sum(mask.float())\n \n loss = self.opt['subj_loss_weight']*(subj_start_loss + subj_end_loss) + (obj_start_loss + obj_end_loss)\n \n # backward\n loss.backward()\n # torch.nn.utils.clip_grad_norm(self.model.parameters(), self.opt['max_grad_norm'])\n self.optimizer.step()\n loss_val = loss.data.item()\n return loss_val", "def fix_graph(graph, model):\n\n def fix_tensor_metadata(tensors, fix_shape=True):\n for tensor in tensors:\n if not tensor.shape and fix_shape:\n tensor.shape = layerwise(model)[tensor.name].shape\n if not tensor.dtype:\n tensor.dtype = layerwise(model)[tensor.name].dtype\n\n fix_tensor_metadata(graph.inputs)\n fix_tensor_metadata(graph.outputs, fix_shape=False)\n\n # If we're marking inputs, there may be cases where some other inputs are required - for\n # example, if the model is branchy. If, after cleanup(), there are any Variable tensors in\n # the graph without inputs, we'll replace them with constants and fold them away.\n tensor_map = graph.tensors()\n needs_const_fold = False\n for tensor in tensor_map.values():\n if isinstance(tensor, gs.Variable) and not tensor.inputs and tensor not in graph.inputs:\n needs_const_fold = True\n G_LOGGER.info(\"Freezing model input: {:}\".format(tensor))\n tensor.to_constant(layerwise(model, include_data=True)[tensor.name])\n\n if needs_const_fold:\n G_LOGGER.info(\"Folding constants to remove extraneous subgraphs\")\n graph.fold_constants().cleanup()\n\n return graph", "def to_cuda(elements):\n if not torch.cuda.is_available():\n return elements\n if isinstance(elements, tuple) or isinstance(elements, list):\n return [x.cuda() for x in elements]\n return elements.cuda()", "def to_cuda(elements):\n if not torch.cuda.is_available():\n return elements\n if isinstance(elements, tuple) or isinstance(elements, list):\n return [x.cuda() for x in elements]\n return elements.cuda()", "def network_inference(self, points):\r\n\r\n # Ensure no gradient is computed\r\n with torch.no_grad():\r\n\r\n #####################\r\n # Input preparation #\r\n #####################\r\n\r\n # t = [time.time()]\r\n\r\n # Create batch from the frame points\r\n batch = OnlineBatch(points, self.config, self.data_handler)\r\n\r\n # t += [time.time()]\r\n\r\n # Convert batch to a cuda\r\n batch.to(self.device)\r\n # t += [time.time()]\r\n torch.cuda.synchronize(self.device)\r\n\r\n #####################\r\n # Network inference #\r\n #####################\r\n\r\n # Forward pass\r\n outputs = self.net(batch, self.config)\r\n torch.cuda.synchronize(self.device)\r\n # t += [time.time()]\r\n\r\n # Get probs and labels\r\n predicted_probs = self.softmax(outputs).cpu().detach().numpy()\r\n torch.cuda.synchronize(self.device)\r\n # t += [time.time()]\r\n\r\n # Insert false columns for ignored labels\r\n for l_ind, label_value in enumerate(self.data_handler.label_values):\r\n if label_value in self.data_handler.ignored_labels:\r\n predicted_probs = np.insert(predicted_probs, l_ind, 0, axis=1)\r\n\r\n # Get predicted labels\r\n predictions = self.data_handler.label_values[np.argmax(predicted_probs, axis=1)].astype(np.int32)\r\n # t += [time.time()]\r\n\r\n # print('\\n************************\\n')\r\n # print('Timings:')\r\n # i = 0\r\n # print('Batch ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('ToGPU ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Forward .... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Softmax .... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # i += 1\r\n # print('Preds ...... {:7.1f} ms'.format(1000*(t[i+1] - t[i])))\r\n # print('-----------------------')\r\n # print('TOTAL ..... {:7.1f} ms'.format(1000*(t[-1] - t[0])))\r\n # print('\\n************************\\n')\r\n\r\n return predictions, batch.points[0].cpu().numpy()", "def make_cuda(tensor):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return tensor", "def make_cuda(tensor):\n if torch.cuda.is_available():\n tensor = tensor.cuda()\n return tensor", "def variables_on_gpu0():\n old_fn = tf.get_variable\n\n def new_fn(*args, **kwargs):\n with tf.device('/gpu:0'):\n return old_fn(*args, **kwargs)\n\n tf.get_variable = new_fn\n yield\n tf.get_variable = old_fn", "def test_no_shared_var_graph():\r\n a=tensor.fmatrix()\r\n b=tensor.fmatrix()\r\n f = theano.function([a,b],[a+b], mode=mode_with_gpu)\r\n l = f.maker.fgraph.toposort()\r\n assert len(l)==4\r\n assert numpy.any(isinstance(x.op,cuda.GpuElemwise) for x in l)\r\n assert numpy.any(isinstance(x.op,cuda.GpuFromHost) for x in l)\r\n assert numpy.any(isinstance(x.op,cuda.HostFromGpu) for x in l)" ]
[ "0.5934677", "0.5921635", "0.58703035", "0.581513", "0.5797795", "0.5776158", "0.57602173", "0.57533664", "0.5746811", "0.5742912", "0.5733459", "0.5706499", "0.56763935", "0.56657827", "0.5658226", "0.5653087", "0.5644797", "0.5627843", "0.55857766", "0.55729073", "0.5570853", "0.55708265", "0.55682164", "0.5534825", "0.5534825", "0.5529216", "0.5528229", "0.5528229", "0.5521497", "0.55093825" ]
0.69795823
0
Plot the languages stored in the dictionaries
def plot_languages(dict_usage_complexities, dict_cognitive_complexity): attested_languages = ( frozenset(['nor', 'and', 'or', 'not']), frozenset(['and', 'or', 'not']), frozenset(['and', 'not']), frozenset(['or', 'not']), ) fig, ax = plt.subplots(figsize=(8.27,4)) for name in dict_usage_complexities.keys(): # if not any([i in ['nc', 'nic', 'bc', 'XOR', 'c', 'ic'] for i in name]) and 'not' in name: if 'not' in name: # if True: usage_complexity = dict_usage_complexities[name] cognitive_complexity = dict_cognitive_complexity[name] if name in attested_languages: color = 'red' zorder = 10 if name == frozenset(['or', 'not']): yshift = 0.4 else: yshift = 0 ax.text( usage_complexity + 0.02, cognitive_complexity + 0.3 + yshift, s=','.join(name), fontsize='x-small' ) else: color='black' zorder = 1 # ax.scatter( # usage_complexity, cognitive_complexity, # color=color, # zorder=zorder # ) # ax.text( # usage_complexity, cognitive_complexity, # s=','.join(name), # fontsize='xx-small', # rotation=90, # color=color # ) ax.scatter(usage_complexity,cognitive_complexity,color=color) ax.set_xlabel('Usage complexity') ax.set_ylabel('Conceptual complexity') # ax.set_xlim(0,3) ax.set_xlim(1.05,2.8) # plt.show() plt.savefig('figure.png', dpi=300, transparent=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def visualize_vecDict(vecDict):\n for url in vecDict:\n plt.plot(vecDict[url])\n plt.legend([key for key in vecDict])\n plt.title(f'Vectors for {len(vecDict)} Documents')\n plt.xlabel('Vector Dimensions')\n plt.ylabel('Document Value')\n plt.show()", "def draw_all_plots(self):\n\n plot_names = []\n e = self.find_di_tri(self.lang_found)\n letter_dct = e[1]\n di_dct = e[2]\n tri_dct = e[3]\n\n plot_name = self.lang_found + '_letters'\n self.wykres(letter_dct, 'Wyres liter', 'litera', plot_name, 0)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_digram'\n self.wykres(di_dct, 'Wykres digramów', 'digram', plot_name, 1)\n plot_names.append(plot_name)\n plot_name = self.lang_found + '_trigram'\n self.wykres(tri_dct, 'Wykres trigramów', 'trigram', plot_name, 2)\n plot_names.append(plot_name)\n\n for cnt, plt_scn in enumerate(self.plot_scenes):\n pic = QtGui.QPixmap(self.img_dir + '/' + plot_names[cnt] + \".png\")\n plt_scn.setPixmap(pic.scaled(427, 320, Qt.KeepAspectRatio))", "def show_line(dict, xlabel=\"x\", ylabel=\"y\", title=\"title\"):\n plt.clf()\n plt.cla()\n plt.plot(list(dict.keys()), list(dict.values()), alpha=0.4, color = 'g')\n plt.xlabel(xlabel)\n plt.ylabel(ylabel)\n plt.title(title)\n plt.show()", "def plot(self, *args):\n return self.vocab().plot(*args)", "def dispersion_plot(self, words):\n from nltk.draw import dispersion_plot\n\n dispersion_plot(self, words)", "def __plot( dict1, title):\n plt.title(title)\n plt.xlabel(\"Run Time (microsecondsS)\")\n plt.ylabel(\"Number of Items Retreived\")\n plt.ylim(0, max(dict1.values()))\n plt.xlim(min(dict1.keys()), max(dict1.keys()) )\n x1, y1 = zip(*dict1.items())\n\n plt.scatter(x1, y1)\n plt.show()", "def plot_embeddings(M_reduced, word2Ind, words):\n\n # YOUR CODE HERE\n \n for i,type in enumerate(words):\n x_coor,y_coor = M_reduced[word2Ind[type]][0],M_reduced[word2Ind[type]][1]\n \n plt.scatter(x_coor, y_coor, marker='*', color='red')\n plt.text(x_coor+0.05, y_coor+0.05, type, fontsize=12)\n \n plt.show()\n \n #raise NotImplementedError()", "def graph():\n fp = mpl.font_manager.FontProperties(family='JasmineUPC',size=24)\n x = np.arange(0,10)\n y = [386557057065, 368368395622, 242451971944, 225960095934, 161573560379, 107461232731, 89784502211, 73749349545, 54525219632, 52864743212]\n name = ['เชื้อเพลิงที่ได้จากแร่', 'เครื่องจักรและส่วนประกอบ', 'ยานยนต์และส่วนประกอบ', 'เครื่องอุปกรณ์ไฟฟ้าและส่วนประกอบ', 'เหล็กและเหล็กกล้า', 'พลาสติกและของทำด้วยพลาสติก', 'ของทำด้วยเหล็กหรือเหล็กกล้า', 'ทองแดงละของทำด้วยทองแดง', 'เคมีภัณฑ์เบ็ดเตล็ด', 'อุปกรณ์ที่ใช้ทางทัศนศาสตร์']\n ax = plt.gca(xticks=x)\n ax.set_xticklabels(name,rotation=1000,fontproperties=fp)\n plt.bar(x,y,color='g')\n plt.show()", "def get_label_by_language(language):\n\n if language in [\"en4\", \"en5\", \"en15\"]:\n plot_label = \"Supreme Court EN\"\n shade = True\n ls = \"-\"\n c = \"C2\"\n elif language in [\"en2\", \"en8\", \"en18\"]:\n plot_label = \"EuroParl EN\"\n shade = True\n ls = \"-\"\n c = \"C8\"\n\n elif language in [\"de2\", \"de8\", \"de18\"]:\n plot_label = \"EuroParl DE\"\n shade = True\n ls = \"-\"\n c = \"C4\"\n\n elif language in [\"de5\", \"de15\"]:\n plot_label = \"BGH Strafsenat\"\n shade = True\n ls = \"-\"\n c = \"C0\"\n\n elif language in [\"de6\", \"de16\"]:\n plot_label = \"BGH Zivilsenat\"\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n elif language in [\"de7\", \"de17\"]:\n plot_label = \"BGH DE\"\n shade = True\n ls = \"-\"\n\n else:\n plot_label = language\n shade = True\n ls = \"-\"\n c = \"C1\"\n\n if language in [\"de15\", \"de16\", \"en15\", \"de17\", \"de18\", \"en18\"]:\n plot_label += \" shuffled\"\n\n return plot_label, shade, ls, c", "def plot_MDS():\n lds = {} #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"abc\":5,\"efg\":6...},...}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n \n distances={} #a dictionary of dictionaries that saves the distances between a language and all other languages\n \n for x in lds.keys():\n distances[x]={}\n for y in lds.keys():\n if x == y: distances[x][y]=0.0\n else: distances[x][y]=cosine_dist(lds[x],lds[y])\n\n dst=np.zeros([len(lds.keys()), len(lds.keys())])\n i=0\n j=0\n for x in lds.keys():\n j=0\n for y in lds.keys():\n dst[i,j]=distances[x][y]\n j+=1\n i+=1\n\n X, languages = prepare_data_matrix()\n\n transformer = MDS(n_components=2, dissimilarity='precomputed')\n transformed = transformer.fit_transform(dst)\n\n plt.scatter(transformed [:,0], transformed [:,1])\n for i in range(len(transformed)):\n plt.text(transformed[i,0], transformed[i,1], languages[i][:3])\n plt.show()", "def plot_timecourse_language_types(lang_class_prop_over_gen_df, title, file_path, file_name):\n sns.set_style(\"darkgrid\")\n sns.set_context(\"talk\")\n\n fig, ax = plt.subplots()\n\n if len(possible_form_lengths) == 1:\n palette = sns.color_palette([\"black\", \"red\", \"green\", \"grey\"])\n else:\n palette = sns.color_palette([\"black\",\n sns.color_palette(\"colorblind\")[3],\n sns.color_palette(\"colorblind\")[1],\n sns.color_palette(\"colorblind\")[2],\n sns.color_palette(\"colorblind\")[9],\n sns.color_palette(\"colorblind\")[0],\n sns.color_palette(\"colorblind\")[7]])\n\n sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette)\n # sns.lineplot(x=\"generation\", y=\"proportion\", hue=\"class\", data=lang_class_prop_over_gen_df, palette=palette, ci=95, err_style=\"bars\")\n\n plt.tick_params(axis='both', which='major', labelsize=18)\n plt.tick_params(axis='both', which='minor', labelsize=18)\n plt.ylim(-0.05, 1.05)\n plt.title(title, fontsize=22)\n plt.xlabel('Generation', fontsize=20)\n plt.ylabel('Mean proportion', fontsize=20)\n handles, labels = ax.get_legend_handles_labels()\n\n labels = ['D', 'H', 'H+Div.', 'C', 'C+Red.-part', 'C+Red.-whole', 'O']\n\n # ax.legend(handles=handles[1:], labels=labels[1:])\n ax.legend(handles=handles, labels=labels)\n plt.tight_layout()\n plt.savefig(file_path + \"Timecourse_plot_lang_types_\" + file_name + \".png\")\n plt.show()", "def plot(self):\n fig, ax = plt.subplots()\n ticklabels = [item.strftime('%b %d') for item in self.series.index]\n ax.xaxis.set_major_formatter(ticker.FixedFormatter(ticklabels))\n\n plt.ylabel('#Cases')\n i = 0\n for y in self.countries:\n plt.plot(ticklabels, self.series[y], GRAPH_FORMATS[i], label=y)\n i += 1\n ax.set_xticklabels(ticklabels, rotation='vertical', fontsize=10)\n plt.legend()\n plt.grid()\n if self.log:\n plt.yscale(\"log\")\n plt.show()", "def list(self):\n for key, value in self.languages.iteritems():\n print key, value", "def plot_word_class_pr_genre(df):\n df['nouns'] = df['nouns'] * 100\n df['verbs'] = df['verbs'] * 100\n df['adverbs'] = df['adverbs'] * 100\n # plotting nouns\n plotting_helper_method('nouns', 'genre', df)\n plt.title('Amount of nouns pr song pr. genre')\n plt.xlabel(\"Amount of nouns in each song\")\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/nouns_pr_genre_plot')\n\n # plotting verbs\n plotting_helper_method('verbs', 'genre', df)\n plt.title('Amount of verbs pr song pr. genre')\n plt.xlabel('Amount of verbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/verbs_pr_genre_plot')\n\n # plotting adverbs\n plotting_helper_method('adverbs', 'genre', df)\n plt.title('Amount of adverbs pr song pr. genre')\n plt.xlabel('Amount of adverbs in each song')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/adverbs_pr_genre_plot')", "def print_languages(config_filepath, label_filepath):\n load_classifier(config_filepath)\n label_filepath = os.path.abspath(label_filepath)\n wili_labels = wili.get_language_data(label_filepath)\n iso2name = dict([(el['ISO 369-3'], el['English'])\n for el in wili_labels])\n print(', '.join(sorted([iso2name[iso]\n for iso in classifier.get_mapping_languages()\n if iso != 'UNK'])))", "def plot_brain_words(brain_scores, plot_order):\n vals = list(zip(*[v.values() for v in brain_scores.values()]))\n labels = Embeddings.get_labels(brain_scores.keys())\n\n def plot_data(ax, data, ord_vocab, ord_name):\n dscores = {'fMRI': 5, 'MEG': 6}[data]\n word_vals = vals[dscores]\n scores = {}\n wordlists = {}\n for label, val in zip(labels, word_vals): # embeddings\n word_dict = {} # Init dictionary with Brain vocab so we have vectors with same length to plot\n for w in ord_vocab:\n word_dict[w] = 0\n for p in val: # participants\n for word_pair in p: # word pairs\n word_dict[word_pair['word1']] += word_pair['hit']\n word_dict[word_pair['word2']] += word_pair['hit']\n # word_dict = dict(((w, word_dict[w]) for w in ord_vocab)) # Sort by concreteness\n scores[label] = list(word_dict.values())\n wordlists[label] = list(word_dict.keys())\n\n # Convert to structured array\n score_arrays = dict2struct_array(scores)\n\n tsuffix = ord_name + ' synset score'\n # Sort by ord_name Embedding\n if ord_name != 'Median' and ord_name != 'Most concrete':\n if ord_name not in labels:\n ord_name = Embeddings.get_label(ord_name)\n tsuffix = 'ordered by ' + ord_name\n score_arrays = np.sort(score_arrays, order=ord_name)\n ord_vocab = [w for w, s in sorted(zip(wordlists[ord_name], scores[ord_name]), key=lambda x: x[1])]\n\n colours, linestyles, alphas = PlotColour.colour_by_modality(labels)\n # allhits = sum([hits for hits in scores.values()], [])\n # ax.set_yticklabels([i for i in range(min(allhits), max(allhits), 6)], rotation=90)\n plot_scores(score_arrays,\n vecs_names=labels,\n labels=None,\n colours=colours,\n linestyles=linestyles,\n title=f'{data} - {tsuffix}',\n alphas=alphas,\n xtick_labels=ord_vocab,\n ax=ax,\n show=False,\n type='scatter',\n swapaxes=True)\n ax.set_xlabel('Hit number')\n ax.yaxis.set_ticks(range(len(ord_vocab)))\n ax.set_yticklabels(ord_vocab, fontsize=14)\n\n if plot_order == 'concreteness':\n # Order by word concreteness\n word_concs = [[w] + list(wn_concreteness(w)) for w in DataSets.fmri_vocab]\n ord_med_vocab = [w for w, cme, cma in sorted(word_concs, key=lambda x: x[1])]\n ord_max_vocab = [w for w, cme, cma in sorted(word_concs, key=lambda x: x[2])]\n\n axs = [i for i in range(4)]\n fig, ((axs[0], axs[1]), (axs[2], axs[3])) = plt.subplots(2, 2, figsize=(20, 15))\n plot_data(axs[0], 'fMRI', ord_med_vocab, 'Median')\n plot_data(axs[1], 'MEG', ord_med_vocab, 'Median')\n plot_data(axs[2], 'fMRI', ord_max_vocab, 'Most concrete')\n plot_data(axs[3], 'MEG', ord_max_vocab, 'Most concrete')\n else:\n axs = [i for i in range(2)]\n fig, ((axs[0], axs[1])) = plt.subplots(1, 2, figsize=(20, 13))\n plot_data(axs[0], 'fMRI', DataSets.fmri_vocab, plot_order)\n plot_data(axs[1], 'MEG', DataSets.fmri_vocab, plot_order)\n\n legs, leglabels = PlotColour.get_legend() # Leave out WordNet concreteness [1:]\n fig.legend(legs[1:], leglabels[1:], loc=9, edgecolor='inherit', ncol=7, borderaxespad=-0.2, numpoints=1, fontsize=16)\n fig.tight_layout(pad=1.0)\n\n return fig", "def _plot_dict_scatter(d):\n xvals, yvals = _dict2lists(d)\n pylab.scatter(xvals, yvals)", "def plotRD(country,level,ngrams,timeint,totalgrams,color,ax=None):\n path = prepath+'{}/Level_{}/results_{}grams/{}hour_{}grams_RD.txt'.format(country,level,totalgrams,timeint,ngrams)\n # MODIFICAR SI AGREGAS MAS PAISES <--------------___!!!!!!!!\n dist4country = { \"Mexico\":np.arange(0,11), \"United_Kingdom\":np.arange(0,10),\"Spain\":np.arange(0,9),\"India\":np.arange(0,11),'Argentina':np.arange(0,11)}\n base=2\n distancias = np.power(base, dist4country[country]).astype(float)*3*1000 #definicion de distancias en km.\n data = np.loadtxt(path)\n \n if not ax:\n fig = plt.figure(figsize=(10,7))\n ax = fig.add_subplot(1,1,1)\n ax.set_title(\"{},{}Km,ngrams={},timeint={}\".format(country,distancias[level]/1000,ngrams,timeint))\n ax.grid(which='both')\n\n ax.plot(data[:,0],data[:,1],c=color,label='{},{}Km,$\\delta t$={},ng={}'.format(country,distancias[level]/1000,timeint,ngrams))\n ax.set_xlabel(\"$k$\")\n ax.set_ylabel('d(k)')\n ax.set_xscale('log')", "def get_variables_to_plot(dictionary):\n done = False\n keys = list(dictionary.keys())\n print_variables_found_on_file(keys)\n print(\"\\n\\nWhich variable is the x-axis? (Give the number.)\")\n x_axis = [keys[int(input()) - 1]]\n y_axis = []\n while not done:\n print_variables_found_on_file(keys)\n print(\"\\n\\nWhich variable is the y-axis? (Give the number.)\")\n y_axis.append(keys[int(input()) - 1])\n print(\"\\nX-axis: \" + x_axis[0] + \"\\nY-axis: \"+\n \"\".join([i+\", \" for i in y_axis]).strip(\", \"))\n done = not is_yes(input(\"\\nAdd more y-axis values?\\n\"))\n return x_axis+y_axis", "def plot_countryperskill(data_df, **args):\n name = args.get('name', 'VARIABLE NAME')\n idx = args.get('idx', data_df.index.values)\n order = args.get('order', np.array([9, 0, 1, 2, 3, 4, 5, 6, 8, 7], int))\n dd = args.get('dd', .7) # 3.3\n wdth = args.get('wdth', 8) # 7\n hght = args.get('hght', 4)\n markersize = 60\n target_y = args.get('target_y', 1)\n label_y = args.get('label_y', r'$\\rho$')\n colors14 = args.get('colors14', ['#a6cee3', '#1f78b4', '#b2df8a', '#33a02c', \\\n '#fb9a99', '#e31a1c', '#fdbf6f', '#ff7f00', \\\n '#cab2d6', '#6a3d9a', '#ffff99', '#b15928', \\\n '#dd1c77', '#8dd3c7'])\n plt.figure(facecolor='w', figsize=(wdth, hght))\n meth_labels = [r'$Lit$', r'$Lit^2$', r'$Lit^3$', r'$Lit^4$', r'$Lit^5$', \\\n r'$Pop$', r'$Pop^2$', r'$Lit^3Pop$', r'$Lit^2Pop$', r'$LitPop$']\n idx = idx[order]\n meth_labels = [meth_labels[i] for i in order]\n # empty plots for legend handlers:\n for i in np.arange(0, len(countries_sel)): # country\n plt.scatter([], [], marker='o', s=markersize, edgecolor='black', linewidth='.4',\\\n c=colors14[i], label=countries[countries_sel[i]])\n plt.legend()\n\n plt.scatter([0, len(idx)+dd], [0.7, 0.7], marker='.', lw=1, c='white') # legendspace\n\n # actual plotting:\n for i in np.arange(0, len(countries_sel)): # country\n for j in np.arange(0, len(idx)):\n # rp - pearson correlation:\n plt.scatter([j], data_df[countries[countries_sel[i]]][idx[j]], marker='o', \\\n s=markersize, edgecolor='black', linewidth='.4',\\\n alpha=1., c=colors14[i], zorder=j+10)\n if not target_y == 'none':\n plt.plot([0, j], [target_y, target_y], c='#d3d3d3', lw=5, ls='-', zorder=1)\n\n plt.xticks(np.arange(0, len(idx)), meth_labels, color='black', rotation=30)\n plt.grid(axis='y')\n # plt.xlabel('Method')\n plt.ylabel(label_y)\n plt.title(name)\n\n plt.savefig(os.path.join(output_path, experiment_name + '_' + 'allcountries_perScore_v4_' + name + '.pdf'),\\\n dpi=600, facecolor='w', edgecolor='w',\n orientation='portrait', papertype=None, format='pdf',\n transparent=False, bbox_inches=None, pad_inches=0.1,\n frameon=None, metadata=None)\n plt.show()", "def plot_individual_tm(xdict, ydict, xprop, yprop, documents, spline):\n figure_array = {}\n for item in documents:\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n print str(item[\"path_id\"])\n x = xdict[item[\"path_id\"]]\n y = ydict[item[\"path_id\"]]\n # fig_title = item[\"path_id\"] + \"(\" + item[\"pretty_formula\"] + \")\" # Individual traces\n fig_title = yprop + item[\"cation_type\"] # Plot by cation\n figure_array[item[\"path_id\"]] = plt.figure(fig_title, figsize=(6,6), dpi=plotting_dpi)\n ax = figure_array[item[\"path_id\"]].add_subplot(111)\n ax.scatter(x,y, s=70, zorder=2, color=tm_color_dict[item[\"tm_type\"][0]], linewidths=2.5, edgecolors='black',\n label=item[\"tm_type\"][0])\n if spline:\n tck = interpolate.splrep(x, y, s=0)\n xnew = np.arange(0, 100, 0.1)\n splfit = interpolate.splev(xnew, tck, der=0)\n x = xnew\n y = splfit\n if item[\"path_id\"][-3:] == \"002\":\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]], linestyle='dashed')\n else:\n ax.plot(x, y, linewidth=2.5, zorder=1, color=tm_color_dict[item[\"tm_type\"][0]])\n ax.set_xlabel(xlabel, fontsize=24)\n # ax.set_ylim([0,1200])\n # ax.set_xlim([7,22])\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size': 14})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.tight_layout()\n plt.show()", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def plot_collective(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n y_ion = {\"Mg\": [], \"Ca\": [], \"Zn\": [], \"Li\": [], \"Na\": []}\n for item in documents:\n if item[\"path_id\"][-3:] == \"001\":\n x_ion[item[\"cation_type\"]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"cation_type\"]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Mg\", \"Ca\", \"Zn\", \"Li\", \"Na\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n \n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n \n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n \n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n # plt.legend(loc='best')\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()", "def visualize_stats(diction, plot_image_name, wordcloud_image_name, plot_title, path):\n\n # sort dictionary by values\n sorted_dict = OrderedDict(sorted(diction.items(), key=lambda t: t[1]))\n # get 20 first key-value pairs of sorted dict\n topdict = dict(list(sorted_dict.items())[-20:])\n\n # make horizontal-bar plots\n sns.set_style(\"darkgrid\")\n ax = plt.figure().gca()\n ax.yaxis.grid(False)\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n plt.barh(range(len(topdict)), list(\n topdict.values()), align='center')\n plt.yticks(range(len(topdict)), list(topdict.keys()))\n plt.xlabel('Frequency')\n plt.title(plot_title)\n # save figure to an image\n plt.savefig(path + plot_image_name, bbox_inches=\"tight\")\n plt.close()\n\n # make word clouds (maximum 100 words)\n wc = WordCloud(width=900, height=600, max_words=100, relative_scaling=1,\n normalize_plurals=False, background_color='white').generate_from_frequencies(diction)\n plt.imshow(wc)\n plt.axis(\"off\")\n # save cloud to an image\n wc.to_file(path + wordcloud_image_name)\n plt.close()", "def plot_topic_words(args, k, word_list):\n\n fig, (axis) = plt.subplots(\n # change nrows and ncols if more plots\n nrows=1, ncols=1, figsize=(18, 9), dpi=80, facecolor=\"w\", edgecolor=\"k\"\n )\n ticks = [str(x) for x in timelist]\n\n tokens = [vocab.index(w) for w in word_list]\n # betas has one weight (?) per time slice for every token\n betas = [beta[k, :, x] for x in tokens]\n for i, comp in enumerate(betas):\n # comp is the list of weights for each slice\n axis.plot(\n comp, label=word_list[i], lw=2, linestyle=\"--\", marker=\"o\", markersize=4\n )\n axis.legend(frameon=False, fontsize=14)\n axis.set_xticks(np.arange(T))\n axis.set_xticklabels(timelist, fontsize=14)\n axis.set_title('Topic: ' + str(k), fontsize=20)\n axis.set_xlabel(xlabel, fontsize=16)\n axis.set_ylabel(\"Word Probability\", fontsize=16)\n fig.tight_layout()\n\n # Save plot to subdirectory in results directory\n sub_dir = os.path.join(\"word_evolutions\", os.path.basename(args.beta_file).split(\"_beta.mat\")[0])\n fig_path = os.path.join(os.path.dirname(args.beta_file), sub_dir, str(k) + \"_word_evolution.png\")\n # Make directory if it doesn't exist\n if not os.path.exists(os.path.dirname(fig_path)): os.makedirs(os.path.dirname(fig_path))\n plt.savefig(fig_path)\n plt.close()\n # plt.show()\n print(\"Figure saved to\", fig_path)", "def plot_lev(od):\r\n plt.bar(*zip(*od.items()))\r\n plt.title(\"Levenshtein distance distribution in the pre-pandemic period\")\r\n plt.xlabel(\"Levenshtein distance\")\r\n plt.ylabel(\"Number of pairs of edits\")\r\n plt.savefig('RQ2_lavenshtein_dis(Figure 4.8).png',dpi=500)", "def plot_genre_and_word_count(df):\n plotting_helper_method('word_count', 'genre', df)\n\n plt.title('Word count pr. genre')\n plt.xlabel('Word Count')\n plt.ylabel('Genre')\n plt.legend()\n plt.show()\n # plt.savefig('src/visualization/feature_plots/word_count_plot')", "def plot_wc(diction, filename):\n assert isinstance(diction, dict)\n assert isinstance(filename, str)\n #word_could_dict=Counter(g)\n custom_mask = np.array(Image.open(\"book.png\"))\n wordcloud = WordCloud(background_color=\"white\",\n #mode=\"RGBA\",\n #colormap='Dark2',\n #colormap='RdBu',\n colormap='BrBG',\n collocations=False,\n mask=custom_mask, contour_width=1,\n contour_color='black',\n width=1200, height=1000,\n max_font_size=80,\n scale=3,\n ).generate_from_frequencies(diction)\n #wc = WordCloud(background_color=\"white\", mask=custom_mask)\n #wc = WordCloud(background_color=\"white\", collocations=False, mask=custom_mask, contour_width=1, contour_color='gray')\n plt.imshow(wordcloud, interpolation='bilinear')\n plt.axis(\"off\")\n plt.savefig(\"{}.png\".format(filename))\n plt.show()", "def plot_collective_tm(xdict, ydict, xprop, yprop, documents):\n x_ion = {\"Ti\": [], \"V\": [], \"Fe\": [], \"Cr\": [], \"Mn\": [], \"Co\": [], \"Ni\": [], \"Mo\": []}\n y_ion = {\"Ti\": [], \"V\": [], \"Fe\": [], \"Cr\": [], \"Mn\": [], \"Co\": [], \"Ni\": [], \"Mo\": []}\n for item in documents:\n x_ion[item[\"tm_type\"][0]].append(xdict[item[\"path_id\"]])\n y_ion[item[\"tm_type\"][0]].append(ydict[item[\"path_id\"]])\n fig = plt.figure(figsize=(6,6), dpi=plotting_dpi)\n ax = fig.add_subplot(111)\n for ion in [\"Ti\", \"V\", \"Cr\", \"Mn\", \"Co\", \"Ni\"]:\n ax.scatter(x_ion[ion], y_ion[ion], s=70, zorder=2, color=tm_color_dict[ion], linewidths=2.5, edgecolors='black',\n label=ion)\n xlabel = \"\\\\textbf{\" + label_dict[xprop] + \"}\"\n ylabel = \"\\\\textbf{\" + label_dict[yprop] + \"}\"\n\n # # Plot lines for fitting, if useful\n # x2 = np.arange(-700, 3300, 1)\n # ax.plot(x2, x2)\n\n # # For setting axis boundaries\n # ax.set_xlim([-700, 3500])\n # ax.set_ylim([0,100])\n\n # Plot display settings\n ax.set_xlabel(xlabel, fontsize=24)\n ax.set_ylabel(ylabel, fontsize=24)\n ax.tick_params(axis='x', labelsize=22)\n ax.tick_params(axis='y', labelsize=22)\n border_width = 2\n [i.set_linewidth(border_width) for i in ax.spines.itervalues()]\n plt.tight_layout()\n plt.legend(loc='best', prop={'size':10})\n plt.rc('text', usetex=True)\n plt.rc('font', family='sans-serif')\n plt.show()", "def show_wordcloud(dictionary, title, min_font = 10):\n wordcloud = WordCloud(min_font_size=min_font).generate_from_frequencies(dictionary)\n plt.figure(figsize = (8, 8), facecolor = None) \n plt.imshow(wordcloud) \n plt.axis(\"off\")\n if title:\n plt.title(title)\n else:\n plt.title(\"Word Cloud\")\n plt.tight_layout(pad = 0) \n\n plt.show()" ]
[ "0.60507584", "0.5974315", "0.5905134", "0.5861216", "0.58539575", "0.57940316", "0.5775768", "0.5712009", "0.57002044", "0.5681444", "0.56463766", "0.5642815", "0.5637291", "0.5622407", "0.5596009", "0.5593054", "0.5562897", "0.55299807", "0.5506552", "0.5498803", "0.5488566", "0.54884934", "0.5484237", "0.54395294", "0.5429819", "0.5396051", "0.5392924", "0.5341676", "0.53403795", "0.5325545" ]
0.6960431
0
Merge draft invoices. Work only with same partner. You can merge invoices and refund invoices with echa other. Moves all lines on the first invoice.
def merge_invoice(self, cr, uid, invoices, context=None): order_ids = [] pick_ids = [] if len(invoices) <= 1: return False parent = self.pool.get('account.invoice').browse(cr, uid, context['active_id']) for inv in invoices: if parent.partner_id != inv.partner_id: raise osv.except_osv(_("Partners don't match!"), _("Can not merge invoice(s) on different partners or states !.")) if inv.state != 'draft': raise osv.except_osv(_("Invalid action !"), _("You can merge only invoices in draft state.")) # Merge invoices that are in draft state inv_line_obj = self.pool.get('account.invoice.line') name = parent.name comment = parent.comment origin = parent.origin for inv in invoices: if inv.id == parent.id: continue # check if a line with the same product already exist. if so add quantity. else hang up invoice line to first invoice head. if inv.name: # Find if the same name already exist, if yes, skip to add. name_list = name.replace(' ', '').split(',') if inv.name not in name_list: name += ', %s' % inv.name if inv.comment: comment = comment and comment + ', %s' % inv.comment or inv.comment if inv.origin: origin += ', %s' % inv.origin line_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', inv.id)]) for inv_lin in inv_line_obj.browse(cr, uid, line_ids): mrg_pdt_ids = inv_line_obj.search(cr, uid, [('invoice_id', '=', parent.id), ('product_id', '=', inv_lin.product_id.id), ('uos_id', '=', inv_lin.uos_id.id), ('price_unit', '=', inv_lin.price_unit) # kittiu: extra condition, unit price must also be the same. ]) if len(mrg_pdt_ids) == 1 and inv.type == parent.type: # product found --> add quantity inv_line_obj.write(cr, uid, mrg_pdt_ids, {'quantity': inv_line_obj._can_merge_quantity(cr, uid, mrg_pdt_ids[0], inv_lin.id)}) inv_line_obj.unlink(cr, uid, inv_lin.id) elif inv.type == parent.type: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id}) else: inv_line_obj.write(cr, uid, inv_lin.id, {'invoice_id': parent.id, 'quantity': -inv_lin.quantity}) if inv.sale_order_ids: order_ids += [order.id for order in inv.sale_order_ids] if inv.picking_ids: pick_ids += [picking.id for picking in inv.picking_ids] self.write(cr, uid, parent.id, {'origin': origin, 'name': name, 'comment': comment}) #Remove By DRB #cr.execute('update sale_order_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) #cr.execute('update picking_invoice_rel set invoice_id = %s where invoice_id = %s', (parent.id, inv.id)) self.unlink(cr, uid, [inv.id]) #Distinct List order_ids = list(set(order_ids)) pick_ids = list(set(pick_ids)) self.write(cr, uid, parent.id, {'sale_order_ids': [(6, 0, order_ids)], 'picking_ids': [(6, 0, pick_ids)]}) self.button_reset_taxes(cr, uid, [parent.id]) return parent.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def merge_purchase_invoice(self):\r\n active_id = self.env['purchase.order'].browse(self.env['purchase.order']._context.get('active_ids'))\r\n journal_id = self.env['account.journal'].search([('type', '=', 'purchase')]) \r\n active_id_count = 0\r\n active_count = 0\r\n exist_vendor = []; invoice = [];exist_vendors = [];ctx = ();invoice_id = []\r\n for rec in active_id : \r\n po_reference = self.env['account.invoice'].search([('origin', 'like', rec.name)])\r\n active_count = len(active_id)\r\n if rec.picking_count >= 1 and rec.picking_count != rec.invoice_count:\r\n len_name = [] \r\n for inv in po_reference: \r\n len_name = inv.origin.split(\":\") \r\n if rec.name in len_name:\r\n if po_reference.state == 'draft':\r\n for record in po_reference.invoice_line_ids:\r\n print (record.line_id)\r\n for res in rec.order_line:\r\n if res.id == record.line_id: \r\n record.write({'quantity':res.qty_received})\r\n res.write({'qty_invoiced':record.quantity})\r\n \r\n else:\r\n \r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received \r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':datetime.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n else:\r\n po_list = [];line_values = {};lines = {};purchase = []\r\n if rec.state in 'purchase' and rec.invoice_status in 'to invoice':\r\n purchase.append(rec.id)\r\n active_id_count = len(purchase)\r\n if rec.partner_id.id in exist_vendor:\r\n for inv in invoice:\r\n if inv['partner_id'] == rec.partner_id.id:\r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received ,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids) or False] ,\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id ,\r\n 'line_id':recc.id\r\n }) \r\n inv['invoice_line_ids'].append(line_values)\r\n inv['origin'] = inv['origin'] + ':' + rec.name\r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendors.append(rec.partner_id.id) \r\n else: \r\n for recc in rec.order_line:\r\n if rec.picking_count > 1 and rec.invoice_count >= 1:\r\n qty_received = recc.qty_received - recc.qty_invoiced \r\n else:\r\n qty_received = recc.qty_received\r\n line_values = (0, 0, {'product_id': recc.product_id.id,\r\n 'quantity': qty_received,\r\n 'price_unit': recc.price_unit,\r\n 'invoice_line_tax_ids': [(6, 0, recc.taxes_id and recc.taxes_id.ids)or False],\r\n 'price_subtotal': recc.price_subtotal,\r\n 'product_uom': recc.product_uom.id,\r\n 'name': recc.name,\r\n 'account_id': journal_id.default_debit_account_id.id,\r\n 'line_id':recc.id\r\n }) \r\n print (rec.id)\r\n po_list.append(line_values) \r\n invoice.append({'origin':rec.name, 'partner_id': rec.partner_id.id, 'invoice_line_ids':po_list, 'account_id': rec.partner_id.property_account_payable_id.id, 'type': 'in_invoice', 'journal_id':journal_id.id,'date_invoice':date.today()}) \r\n if rec.partner_id.id not in exist_vendor:\r\n exist_vendor.append(rec.partner_id.id) \r\n \r\n invoices = []\r\n invoice_counts = 0\r\n for record in invoice:\r\n invoice_id = self.env['account.invoice'].create(record)\r\n invoices.append(invoice_id.id)\r\n invoice_counts = len(invoices)\r\n if active_id_count == 1:\r\n if invoice_counts == 1:\r\n form_view = self.env.ref('purchase.view_invoice_supplier_purchase_form').id\r\n tree_view = self.env.ref('account.invoice_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'res_id':invoices[0],\r\n 'views_id':False,\r\n 'views':[(form_view , 'form'), (tree_view , 'tree')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n } \r\n else: \r\n form_view = self.env.ref('account.invoice_supplier_form').id\r\n tree_view = self.env.ref('account.invoice_supplier_tree').id \r\n return{\r\n 'name': _('Invoice'),\r\n 'type':'ir.actions.act_window',\r\n 'view_type':'form',\r\n 'view_mode':'form,tree',\r\n 'res_model':'account.invoice',\r\n 'views_id':True,\r\n 'views':[(tree_view , 'tree'), (form_view , 'form')],\r\n 'domain':[('id', 'in', invoices)],\r\n 'target': 'current',\r\n }", "def create_or_find_b2b_invoices_and_process_ept(self, row, sale_order, invoice_date, tax):\n\n vat_number = row.get('Buyer Tax Registration', False)\n invoice_number = row.get('VAT Invoice Number', False)\n\n invoices = sale_order.invoice_ids.filtered(\n lambda x: x.type == 'out_invoice' and x.state != 'cancel')\n if not invoices:\n lines = sale_order.order_line.filtered(lambda line: line.qty_to_invoice > 0)\n if not lines:\n return False\n invoices = sale_order._create_invoices()\n self.write({'invoice_ids': [(4, invoices and invoices.id)]})\n\n for invoice in invoices:\n if not invoice.partner_id.vat:\n invoice.partner_id.vat = vat_number\n\n payments_lines = []\n if invoice.invoice_payments_widget != 'false':\n payments_dict = json.loads(invoice.invoice_payments_widget)\n payments_content = payments_dict.get('content', [])\n for line in payments_content:\n payments_lines.append(line.get('payment_id', False))\n\n invoice_line = invoice.mapped('invoice_line_ids').filtered(\\\n lambda line: line.tax_ids != tax)\n if invoice_line:\n invoice.button_draft()\n invoice.write({'ref': invoice_number, 'date': invoice_date})\n\n if len(invoice_line) > 1:\n for line in invoice_line:\n line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n else:\n invoice_line.with_context({'check_move_validity': False}).write( \\\n {'tax_ids': [(6, 0, [tax.id])]})\n\n invoice.with_context({'check_move_validity': False})._recompute_tax_lines( \\\n recompute_tax_base_amount=True)\n invoice.action_post()\n for line in payments_lines:\n invoice.js_assign_outstanding_line(line)\n\n return True", "def action_draft(self):\n context = self._context or {}\n inv_obj = self.env['account.invoice']\n\n brw = self.browse( self.ids[0])\n inv_ids = [i.invoice_id.id for i in brw.line_ids]\n if inv_ids:\n inv_obj.write( {'wh_src_id': False})\n\n return self.write( {'state': 'draft'})", "def do_merge(self, cr, uid, ids, context=None): \n invent_obj = self.pool.get('stock.inventory')\n invent_line_obj = self.pool.get('stock.inventory.line')\n invent_lines = {}\n if context is None:\n context = {}\n for inventory in invent_obj.browse(cr, uid, context['active_ids'], context=context):\n if inventory.state == \"done\":\n raise osv.except_osv(_('Warning!'),\n _('Merging is only allowed on draft inventories.'))\n\n for line in inventory.inventory_line_id:\n key = (line.location_id.id, line.product_id.id, line.product_uom.id)\n if key in invent_lines:\n invent_lines[key] += line.product_qty\n else:\n invent_lines[key] = line.product_qty\n\n\n new_invent = invent_obj.create(cr, uid, {\n 'name': 'Merged inventory'\n }, context=context)\n\n for key, quantity in invent_lines.items():\n invent_line_obj.create(cr, uid, {\n 'inventory_id': new_invent,\n 'location_id': key[0],\n 'product_id': key[1],\n 'product_uom': key[2],\n 'product_qty': quantity,\n })\n\n return {'type': 'ir.actions.act_window_close'}", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].sudo().precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_to_invoice < 0):\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.sudo().create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoice['sale_order_id'] = order.id\n elif group_key in invoices:\n vals = {}\n if order.name not in invoices[group_key].origin.split(', '):\n vals['origin'] = invoices[group_key].origin + ', ' + order.name\n if order.client_order_ref and order.client_order_ref not in invoices[group_key].name.split(\n ', ') and order.client_order_ref != invoices[group_key].name:\n vals['name'] = invoices[group_key].name + ', ' + order.client_order_ref\n invoices[group_key].sudo().write(vals)\n if line.qty_to_invoice > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n elif line.qty_to_invoice < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_to_invoice)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n if not invoices:\n raise UserError(_('There is no invoiceable line.'))\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoiceable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_untaxed < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n for order in self:\n group_key = order.id if grouped else (order.partner_id.id, order.currency_id.id)\n for line in order.order_line.sorted(key=lambda l: l.qty_received - l.qty_invoiced < 0):\n if float_is_zero(line.qty_received - line.qty_invoiced, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.partner_ref and order.partner_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.partner_ref)\n\n if line.qty_received - line.qty_invoiced > 0:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n elif line.qty_received - line.qty_invoiced < 0 and final:\n line.invoice_line_create(invoices[group_key].id, line.qty_received - line.qty_invoiced)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n\n if not invoices:\n raise UserError(_('There is no invoicable line.'))\n\n for invoice in invoices.values():\n if not invoice.invoice_line_ids:\n raise UserError(_('There is no invoicable line.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'in_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def action_invoice_create(self, grouped=False, final=False):\n if self.invoice_option == 'before_delivery':\n inv_obj = self.env['account.invoice']\n for order in self:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n for inv_line in order.order_line:\n inv_line.invoice_line_create(invoice.id, inv_line.product_uom_qty)\n\n else:\n inv_obj = self.env['account.invoice']\n precision = self.env['decimal.precision'].precision_get('Product Unit of Measure')\n invoices = {}\n references = {}\n invoices_origin = {}\n invoices_name = {}\n\n # Keep track of the sequences of the lines\n # To keep lines under their section\n inv_line_sequence = 0\n for order in self:\n group_key = order.id if grouped else (order.partner_invoice_id.id, order.currency_id.id)\n\n # We only want to create sections that have at least one invoiceable line\n pending_section = None\n\n # Create lines in batch to avoid performance problems\n line_vals_list = []\n # sequence is the natural order of order_lines\n for line in order.order_line:\n if line.display_type == 'line_section':\n pending_section = line\n continue\n if float_is_zero(line.qty_to_invoice, precision_digits=precision):\n continue\n if group_key not in invoices:\n inv_data = order._prepare_invoice()\n invoice = inv_obj.create(inv_data)\n references[invoice] = order\n invoices[group_key] = invoice\n invoices_origin[group_key] = [invoice.origin]\n invoices_name[group_key] = [invoice.name]\n elif group_key in invoices:\n if order.name not in invoices_origin[group_key]:\n invoices_origin[group_key].append(order.name)\n if order.client_order_ref and order.client_order_ref not in invoices_name[group_key]:\n invoices_name[group_key].append(order.client_order_ref)\n\n if line.qty_to_invoice > 0 or (line.qty_to_invoice < 0 and final):\n if pending_section:\n section_invoice = pending_section.invoice_line_create_vals(\n invoices[group_key].id,\n pending_section.qty_to_invoice\n )\n inv_line_sequence += 1\n section_invoice[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(section_invoice)\n pending_section = None\n\n inv_line_sequence += 1\n inv_line = line.invoice_line_create_vals(\n invoices[group_key].id, line.qty_to_invoice\n )\n inv_line[0]['sequence'] = inv_line_sequence\n line_vals_list.extend(inv_line)\n\n if references.get(invoices.get(group_key)):\n if order not in references[invoices[group_key]]:\n references[invoices[group_key]] |= order\n\n self.env['account.invoice.line'].create(line_vals_list)\n\n for group_key in invoices:\n invoices[group_key].write({'name': ', '.join(invoices_name[group_key]),\n 'origin': ', '.join(invoices_origin[group_key])})\n sale_orders = references[invoices[group_key]]\n if len(sale_orders) == 1:\n invoices[group_key].reference = sale_orders.reference\n\n if not invoices:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n\n for invoice in invoices.values():\n invoice.compute_taxes()\n if not invoice.invoice_line_ids:\n raise UserError(_(\n 'There is no invoiceable line. If a product has a Delivered quantities invoicing policy, please make sure that a quantity has been delivered.'))\n # If invoice is negative, do a refund invoice instead\n if invoice.amount_total < 0:\n invoice.type = 'out_refund'\n for line in invoice.invoice_line_ids:\n line.quantity = -line.quantity\n # Use additional field helper function (for account extensions)\n for line in invoice.invoice_line_ids:\n line._set_additional_fields(invoice)\n # Necessary to force computation of taxes. In account_invoice, they are triggered\n # by onchanges, which are not triggered when doing a create.\n invoice.compute_taxes()\n # Idem for partner\n so_payment_term_id = invoice.payment_term_id.id\n fp_invoice = invoice.fiscal_position_id\n invoice._onchange_partner_id()\n invoice.fiscal_position_id = fp_invoice\n # To keep the payment terms set on the SO\n invoice.payment_term_id = so_payment_term_id\n invoice.message_post_with_view('mail.message_origin_link',\n values={'self': invoice, 'origin': references[invoice]},\n subtype_id=self.env.ref('mail.mt_note').id)\n return [inv.id for inv in invoices.values()]", "def _get_related_invoices(self):\n rslt = super(StockMove, self)._get_related_invoices()\n rslt += self.mapped('picking_id.subcontract_id.invoice_ids').filtered(lambda x: x.state not in ('draft', 'cancel'))\n return rslt", "def create_invoices(self, cr, uid, ids, context=None):\n invoice_list = []\n po_obj = self.pool.get('purchase.order')\n inv_line_obj = self.pool.get('account.invoice.line')\n inv_obj = self.pool.get('account.invoice')\n addr_obj = self.pool.get('res.partner')\n journal_obj = self.pool.get('account.journal')\n if context is None:\n context = {}\n\n for purchase_adv_obj in self.browse(cr, uid, ids, context=context):\n for purchase_order in po_obj.browse(cr, uid, context.get('active_ids', []), context=context):\n inv_line_ids = []\n invoice_ids = []\n val = inv_line_obj.product_id_change(cr, uid, [], purchase_adv_obj.product_id.id,\n uom_id=False, partner_id=purchase_order.partner_id.id, fposition_id=purchase_order.fiscal_position.id)\n line_id = inv_line_obj.create(cr, uid, {\n 'name': val['value']['name'],\n 'account_id': val['value']['account_id'],\n 'price_unit': purchase_adv_obj.amount,\n 'quantity': purchase_adv_obj.qtty,\n 'discount': False,\n 'uos_id': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'invoice_line_tax_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n })\n inv_line_ids.append(line_id)\n addr = addr_obj.address_get(cr, uid, [purchase_order.partner_id.id], ['invoice'])\n journal_ids = journal_obj.search(cr, uid, [('type', '=', 'purchase')])\n context.update({'type':'in_invoice','journal_type':'purchase'})\n inv_vals = {\n 'name': purchase_order.partner_ref or purchase_order.name,\n 'origin': purchase_order.name,\n 'type': 'in_invoice',\n 'reference': False,\n 'account_id': purchase_order.partner_id.property_account_payable.id,\n 'journal_id':journal_ids and journal_ids[0] or False,\n 'partner_id': purchase_order.partner_id.id,\n 'address_invoice_id': addr['invoice'],\n 'invoice_line': [(6, 0, inv_line_ids)],\n 'currency_id': purchase_order.pricelist_id.currency_id.id,\n 'comment': '',\n 'payment_term': purchase_order.payment_term_id and purchase_order.payment_term_id.id or False,\n 'fiscal_position': purchase_order.fiscal_position.id or purchase_order.partner_id.property_account_position.id,\n 'prepaid': True\n }\n\n inv_id = inv_obj.create(cr, uid, inv_vals, context=context)\n inv_obj.button_reset_taxes(cr, uid, [inv_id], context=context)\n for invoice in purchase_order.invoice_ids:\n invoice_ids.append(invoice.id)\n invoice_ids.append(inv_id)\n po_obj.write(cr, uid, purchase_order.id, {'invoice_ids': [(6, 0, invoice_ids)]})\n invoice_list.append(inv_id)\n\n if purchase_order.invoice_method in ('picking','order'):\n self.pool.get('purchase.order.line').create(cr, uid, {\n 'order_id': purchase_order.id,\n 'name': val['value']['name'],\n 'date_planned':purchase_order.date_order,\n 'price_unit': -purchase_adv_obj.amount,\n 'product_uom_qty': purchase_adv_obj.qtty,\n 'product_uos': val['value']['uos_id'],\n 'product_uom': val['value']['uos_id'],\n 'product_id': purchase_adv_obj.product_id.id,\n 'adavance_product':True,\n 'discount': False,\n 'taxes_id': [(6, 0, val['value']['invoice_line_tax_id'])],\n }, context=context)\n\n\n context.update({'invoice_id':invoice_list})\n return {\n 'name': 'Open Invoice',\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'purchase.open.invoice',\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n 'context': context\n }", "def action_move_create(self):\n inv_obj = self.env['account.invoice']\n context = dict(self._context or {})\n context.update({'wh_src': True})\n ret = self.browse(self.ids[0])\n for line in ret.line_ids:\n if line.move_id:\n raise exceptions.except_orm(\n _('Invoice already withhold !'),\n _(\"You must omit the follow invoice '%s' !\") %\n (line.invoice_id.number,))\n\n acc_id = ret.account_id.id\n journal_id = ret.journal_id.id\n demo_enabled = self.env['ir.module.module'].search(\n [('name', '=', 'base'), ('demo', '=', True)])\n args = [('id', 'in')]\n if not demo_enabled:\n args.append(('special', '=', False))\n\n if ret.line_ids:\n for line in ret.line_ids:\n writeoff_account_id, writeoff_journal_id = False, False\n amount = line.wh_amount\n if line.invoice_id.type in ['in_invoice', 'in_refund']:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.supplier_invoice_number or '')\n else:\n name = 'COMP. RET. CRS ' + ret.number + ' Doc. ' + (\n line.invoice_id.number or '')\n # ret_move = inv_obj.ret_and_reconcile(\n # self, [line.invoice_id.id], amount, acc_id,\n # journal_id, writeoff_account_id,\n # writeoff_journal_id, ret.date_ret, name, [line]\n # )\n # rl = {\n # 'move_id': ret_move['move_id'],\n # }\n #lines = [(1, line.id)]\n self.write({'line_ids': line})\n\n if (line.invoice_id.type in [\n 'out_invoice', 'out_refund']):\n inv_obj.write({'wh_src_id': ret.id})\n else:\n return False\n return True", "def recompute_billing_lines(self, cr, uid, ids, partner_id, journal_id, price, currency_id, date, context=None):\n def _remove_noise_in_o2m():\n \"\"\"if the line is partially reconciled, then we must pay attention to display it only once and\n in the good o2m.\n This function returns True if the line is considered as noise and should not be displayed\n \"\"\"\n if line.reconcile_partial_id:\n sign = 1\n if currency_id == line.currency_id.id:\n if line.amount_residual_currency * sign <= 0:\n return True\n else:\n if line.amount_residual * sign <= 0:\n return True\n return False\n\n if context is None:\n context = {}\n billing_date_condition = context.get('billing_date_condition', [])\n context_multi_currency = context.copy()\n if date:\n context_multi_currency.update({'date': date})\n\n currency_pool = self.pool.get('res.currency')\n move_line_pool = self.pool.get('account.move.line')\n partner_pool = self.pool.get('res.partner')\n journal_pool = self.pool.get('account.journal')\n line_pool = self.pool.get('account.billing.line')\n\n #set default values\n default = {\n 'value': {'line_cr_ids': [] },\n }\n\n #drop existing lines\n line_ids = ids and line_pool.search(cr, uid, [('billing_id', '=', ids[0])]) or False\n if line_ids:\n line_pool.unlink(cr, uid, line_ids)\n\n if not partner_id or not journal_id:\n return default\n\n journal = journal_pool.browse(cr, uid, journal_id, context=context)\n partner = partner_pool.browse(cr, uid, partner_id, context=context)\n currency_id = currency_id or journal.company_id.currency_id.id\n account_id = False\n if journal.type in ('sale','sale_refund'):\n account_id = partner.property_account_receivable.id\n elif journal.type in ('purchase', 'purchase_refund','expense'):\n account_id = partner.property_account_payable.id\n else:\n account_id = journal.default_credit_account_id.id or journal.default_debit_account_id.id\n\n default['value']['account_id'] = account_id\n\n if journal.type not in ('cash', 'bank'):\n return default\n\n total_credit = price or 0.0\n account_type = 'receivable'\n\n if not context.get('move_line_ids', False):\n ids = move_line_pool.search(cr, uid, \n [('state','=','valid'), ('account_id.type', '=', account_type), ('reconcile_id', '=', False), ('partner_id', '=', partner_id), \n ] + billing_date_condition, \n context=context)\n else:\n ids = context['move_line_ids']\n invoice_id = context.get('invoice_id', False)\n company_currency = journal.company_id.currency_id.id\n move_line_found = False\n\n #order the lines by most old first\n ids.reverse()\n account_move_lines = move_line_pool.browse(cr, uid, ids, context=context)\n\n #compute the total debit/credit and look for a matching open amount or invoice\n for line in account_move_lines:\n if _remove_noise_in_o2m():\n continue\n\n if invoice_id:\n if line.invoice.id == invoice_id:\n #if the invoice linked to the billing line is equal to the invoice_id in context\n #then we assign the amount on that line, whatever the other billing lines\n move_line_found = line.id\n break\n elif currency_id == company_currency:\n #otherwise treatments is the same but with other field names\n if line.amount_residual == price:\n #if the amount residual is equal the amount billing, we assign it to that billing\n #line, whatever the other billing lines\n move_line_found = line.id\n break\n #otherwise we will split the billing amount on each line (by most old first)\n total_credit += line.credit or 0.0\n elif currency_id == line.currency_id.id:\n if line.amount_residual_currency == price:\n move_line_found = line.id\n break\n total_credit += line.credit and line.amount_currency or 0.0\n\n #billing line creation\n for line in account_move_lines:\n\n if _remove_noise_in_o2m():\n continue\n\n if line.currency_id and currency_id==line.currency_id.id:\n amount_original = abs(line.amount_currency)\n amount_unreconciled = abs(line.amount_residual_currency)\n else:\n amount_original = currency_pool.compute(cr, uid, company_currency, currency_id, line.credit or 0.0)\n amount_unreconciled = currency_pool.compute(cr, uid, company_currency, currency_id, abs(line.amount_residual))\n line_currency_id = line.currency_id and line.currency_id.id or company_currency\n rs = {\n 'move_line_id':line.id,\n 'type': line.credit and 'dr' or 'cr',\n 'reference':line.invoice.reference,\n 'account_id':line.account_id.id,\n 'amount_original': amount_original,\n 'amount': (move_line_found == line.id) and min(abs(price), amount_unreconciled) or amount_unreconciled,\n 'date_original':line.date,\n 'date_due':line.date_maturity,\n 'amount_unreconciled': amount_unreconciled,\n 'currency_id': line_currency_id,\n }\n \n # Negate DR records\n if rs['type'] == 'dr':\n rs['amount_original'] = - rs['amount_original']\n rs['amount'] = - rs['amount']\n rs['amount_unreconciled'] = - rs['amount_unreconciled']\n\n if rs['amount_unreconciled'] == rs['amount']:\n rs['reconcile'] = True\n else:\n rs['reconcile'] = False\n\n default['value']['line_cr_ids'].append(rs)\n\n# if ttype == 'payment' and len(default['value']['line_cr_ids']) > 0:\n# default['value']['pre_line'] = 1\n# elif ttype == 'receipt' and len(default['value']['line_dr_ids']) > 0:\n# default['value']['pre_line'] = 1\n default['value']['billing_amount'] = self._compute_billing_amount(cr, uid, default['value']['line_cr_ids'], price)\n return default", "def create_invoice(self):\n for line in self:\n # if not line.account_id:\n # raise UserError(_('Please Add the incoming Account !!'))\n self.ensure_one()\n journal_id = self.env['account.journal'].search([\n ('type', '=', 'sale')], limit=1)\n inv_line_main = {\n 'name': line.description.name,\n 'price_unit': line.amount or 0.00,\n 'quantity': 1,\n 'discount': line.discount,\n 'account_id': line.description.property_account_income_id.id or line.description.categ_id.property_account_income_categ_id.id or False,\n }\n inv_values = {\n 'partner_id': line.patient_id.partner_id.id,\n 'patient_id': line.patient_id.id,\n 'dentist': line.dentist.id,\n 'move_type': 'out_invoice',\n 'invoice_date': datetime.now().strftime(DF) or False,\n 'journal_id': journal_id and journal_id.id or False,\n 'teeth_id': line.patient_id and line.patient_id.id or False,\n }\n acc_id = self.env['account.move'].create(inv_values)\n acc_id.write({'invoice_line_ids': [(0, 0, inv_line_main)]})\n\n self.write({'invc_id': acc_id.id, 'inv': True})\n context = dict(self._context or {})\n wiz_form_id = self.env['ir.model.data'].get_object_reference(\n 'account', 'view_move_form')[1]\n\n return {\n 'view_type': 'form',\n 'view_id': wiz_form_id,\n 'view_mode': 'form',\n 'res_model': 'account.move',\n 'res_id': self.invc_id.id,\n 'type': 'ir.actions.act_window',\n 'target': 'current',\n 'context': context,\n }", "def action_move_create(self):\n res = super(HrExpenseExpense, self).action_move_create()\n for expense in self:\n if expense.invoice:\n partner = expense.invoice.partner_id.commercial_partner_id\n move_lines = expense.account_move_id.line_ids\n c_move_lines = move_lines.filtered(\n lambda x: x.partner_id == partner and\n x.debit == abs(round(expense.invoice.residual, 2)))\n c_move_lines |= expense.invoice.move_id.line_ids.filtered(\n lambda x: x.account_id == expense.invoice.account_id and\n x.credit == abs(round(expense.invoice.residual, 2)))\n if len(c_move_lines) != 2:\n raise exceptions.Warning(\n _('Cannot reconcile supplier invoice payable with '\n 'generated line. Please check amounts and see '\n 'if the invoice is already added or paid. '\n 'Invoice: %s') % expense.invoice.number)\n c_move_lines.reconcile()\n return res", "def withholding_reconciliation(self):\n\n for inv_brw in self:\n move_ids = [move.id or False\n for move in (inv_brw.move_id, inv_brw.wh_move_id)]\n\n if not all(move_ids):\n continue\n\n line_ids = [line.id\n for move2 in (inv_brw.move_id, inv_brw.wh_move_id)\n for line in move2.line_id\n if line.account_id.id == inv_brw.account_id.id]\n\n if len(line_ids) < 2:\n continue\n\n # /!\\ NOTE: There could be some payments in the invoice let us\n # reconcile them too\n line_ids += [lin2.id for lin2 in inv_brw.payment_ids]\n line_ids = list(set(line_ids))\n\n line_ids = self.env['account.move.line'].browse(line_ids)\n line_ids.reconcile_partial()\n\n return True", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_ids = self.pool.get('account.journal').search(cr, uid,\n [('type', '=', 'sale'), ('company_id', '=', order.company_id.id)],\n limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no sales journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_id.property_account_receivable.id,\n 'journal_id': order.partner_id.property_default_sale_invoice_journal.id,\n 'partner_id': order.partner_id.id,\n 'address_invoice_id': order.partner_invoice_id.id,\n #'address_contact_id': order.partner_order_id.id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n\n return invoice_vals", "def _prepare_invoice_grp(self, cr, uid, order, line_ids, context=None):\n if context is None:\n context = {}\n context = dict(context)\n\n inv_data = super(grp_orden_compra, self)._prepare_invoice_grp(cr, uid, order, line_ids, context=context)\n\n # adicionando campos numero compromiso y no obligacion desde la OC\n monto_oc = math.floor(order.total_llavep or 0)\n monto_oc = int(monto_oc)\n inv_data.update({'nro_compromiso': order.nro_compromiso or False, 'monto_comprometido': monto_oc or 0, 'currency_id':order.currency_oc.id})\n\n # adicionando campos no afectacion y monto autorizado desde la primera APG\n if order.pc_apg_id:\n first_apg = order.pc_apg_id\n monto_apg = math.floor(first_apg.total_llavep)\n monto_apg = int(monto_apg)\n # TODO R SPRING X ADICIONANDO CABEZALES SIIF A LA FACTURA A PARTIR DE LA APG\n inv_data.update({'nro_afectacion': first_apg.nro_afectacion_siif or False,\n 'monto_afectado': monto_apg or 0,\n 'siif_tipo_ejecucion':first_apg.siif_tipo_ejecucion.id,\n 'siif_concepto_gasto':first_apg.siif_concepto_gasto.id,\n 'siif_financiamiento':first_apg.siif_financiamiento.id,\n 'siif_codigo_sir':first_apg.siif_codigo_sir.id,\n 'siif_nro_fondo_rot':first_apg.siif_nro_fondo_rot.id,\n }) # cambiando nro_afectacion 23/10\n # inv.update({'nro_afectacion': first_apg.nro_afectacion_apg or False, 'monto_afectado': monto_apg or 0})\n\n # # TODO R SPRING X NO LLEVAR LAS LLAVES PRESUPUESTALES POR DEFECTO\n # if order.pc_apg_id.llpapg_ids:\n # llavep_ids = []\n # for llavep in order.pc_apg_id.llpapg_ids:\n # llavep_ids.append((0, 0, {\n # 'programa_id': llavep.programa_id.id,\n # 'odg_id': llavep.odg_id.id,\n # 'auxiliar_id': llavep.auxiliar_id.id,\n # 'disponible': llavep.disponible,\n # 'proyecto_id': llavep.proyecto_id.id,\n # 'fin_id': llavep.fin_id.id,\n # 'mon_id': llavep.mon_id.id,\n # 'tc_id': llavep.tc_id.id,\n # 'importe': llavep.importe\n # }))\n # inv_data.update({'llpapg_ids': llavep_ids})\n\n return inv_data", "def _prepare_invoice(self, cr, uid, order, lines, context=None):\n if context is None:\n context = {}\n journal_id = self.pool['account.invoice'].default_get(cr, uid, ['journal_id'], context=context)['journal_id']\n if not journal_id:\n raise osv.except_osv(_('Error!'),\n _('Please define sales journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n invoice_vals = {\n 'name': order.client_order_ref or '',\n 'origin': order.name,\n 'type': 'out_invoice',\n 'reference': order.client_order_ref or order.name,\n 'account_id': order.partner_invoice_id.property_account_receivable.id,\n 'partner_id': order.partner_invoice_id.id,\n 'journal_id': journal_id,\n 'invoice_line': [(6, 0, lines)],\n 'currency_id': order.pricelist_id.currency_id.id,\n 'comment': order.note,\n 'payment_term': order.payment_term and order.payment_term.id or False,\n 'fiscal_position': order.fiscal_position.id or order.partner_invoice_id.property_account_position.id,\n 'date_invoice': context.get('date_invoice', False),\n 'company_id': order.company_id.id,\n 'user_id': order.user_id and order.user_id.id or False,\n 'section_id' : order.section_id.id,\n 'test_1' :order.test\n }\n\n # Care for deprecated _inv_get() hook - FIXME: to be removed after 6.1\n invoice_vals.update(self._inv_get(cr, uid, order, context=context))\n return invoice_vals", "def duplicate_invoice(invoice):\n from invoicer.models import Invoice\n from invoicer.models import LineItem\n\n # copy main attributes\n new_invoice = Invoice(\n company=invoice.company,\n invoice_date=datetime.now(),\n client=invoice.client,\n location=invoice.location,\n tax_rate=invoice.tax_rate,\n left_address=invoice.left_address,\n right_address=invoice.right_address,\n terms=invoice.terms,\n footer=invoice.footer\n )\n new_invoice.save()\n\n # now line items\n for line_item in invoice.line_items.all():\n new_invoice.line_items.add(LineItem(\n name=line_item.name,\n description=line_item.description,\n price=line_item.price,\n taxable=line_item.taxable,\n item=line_item.item,\n quantity=line_item.quantity\n ))\n\n return new_invoice", "def action_cancel_draft(self):\n for statement in self:\n statement_lines = statement.credit_move_line_ids + statement.debit_move_line_ids\n for statement_line in statement_lines:\n if statement_line:\n statement_line.write({'cleared_bank_account': False,\n 'research_required': False\n })\n if statement_line.move_line_id:\n statement_line.move_line_id.write({'cleared_bank_account': False,\n 'bank_acc_rec_statement_id': False,\n })\n if statement_line.move_line_id.move_id:\n statement_line.move_line_id.move_id.write({'is_reconciled': False})\n\n statement.write({'state': 'draft',\n 'verified_by_user_id': False,\n 'verified_date': False\n })\n return True", "def envoi_par_mail(self):\n cr , uid, context = self.env.args\n if not self.pool['res.users'].has_group(cr, uid, 'is_plastigray.is_comptable_group'):\n raise Warning(u\"Accès non autorisé !\")\n ids=[]\n for obj in self:\n ids.append(str(obj.id))\n if len(ids)>0:\n SQL=\"\"\"\n select ai.is_mode_envoi_facture, ai.partner_id, ai.name, ai.id\n from account_invoice ai\n where \n ai.id in(\"\"\"+','.join(ids)+\"\"\") and \n ai.is_date_envoi_mail is null and \n ai.is_mode_envoi_facture like 'mail%'\n order by ai.is_mode_envoi_facture, ai.partner_id, ai.name\n \"\"\"\n cr.execute(SQL)\n result = cr.fetchall()\n\n # ** Un mail par client*********************************************\n partners={}\n for row in result:\n if row[0]=='mail_client':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n # ** Un mail+BL par client******************************************\n for row in result:\n if row[0]=='mail_client_bl':\n partner_id = row[1]\n id = row[3]\n if not partner_id in partners:\n partners[partner_id]=[]\n partners[partner_id].append(id)\n #*******************************************************************\n\n\n #** Envoi des mails par partner ************************************\n for partner_id in partners:\n ids=partners[partner_id]\n self._envoi_par_mail(partner_id, ids)\n #*******************************************************************\n\n\n # ** Un mail par facture *******************************************\n for row in result:\n if row[0] in ['mail', 'mail_regroupe_bl']:\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************\n\n\n # ** Un mail par facture en double exemplaire **********************\n for row in result:\n if row[0]=='mail2':\n partner_id = row[1]\n id = row[3]\n self._envoi_par_mail(partner_id, [id])\n #*******************************************************************", "def button_fac_cob_ent(self):\n invoice = self._fac_ent()\n\n # pagar la factura\n # hacer configuracion para modificar esto\n receipt_obj = self.env['account.voucher.receiptbook']\n receipt = receipt_obj.search([('name', 'like', 'Recibos')], limit=1)\n\n journal = self.journal_id\n res = invoice.invoice_pay_customer()\n context = res['context']\n\n account_voucher_obj = self.env['account.voucher']\n voucher = account_voucher_obj.create({\n 'partner_id': context['default_partner_id'],\n 'journal_id': journal.id,\n 'account_id': journal.default_debit_account_id.id,\n 'type': context['type'],\n 'amount': context['default_amount'],\n 'net_amount': context['default_amount'],\n 'receiptbook_id': receipt.id,\n 'company_id': self.env.user.company_id.id\n })\n voucher.signal_workflow('proforma_voucher')\n\n account_move_line_obj = self.env['account.move.line']\n\n # obtener un recordser vacio\n lines2rec = account_move_line_obj.browse()\n\n # obtener las lineas a conciliar de facturas\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', invoice.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n # obtener las lineas a conciliar de pagos\n account_move_line = account_move_line_obj.search(\n [('document_number', '=', voucher.document_number)])\n for re in account_move_line:\n if re.account_id.reconcile:\n lines2rec += re\n\n period_obj = self.env['account.period']\n period = period_obj.find()\n\n # reconciliar las lineas de factura con pagos\n lines2rec.reconcile('manual',\n journal.default_debit_account_id.id, # writeoff_acc_id\n period.id, # writeoff_period_id,\n journal.id) # writeoff_journal_id)\n\n # imprime factura\n datas = {\n 'ids': invoice.ids,\n 'model': 'account.report_invoice',\n 'form': invoice.read()\n }\n return {\n 'type': 'ir.actions.report.xml',\n 'report_name': 'aeroo_report_ar_einvoice',\n 'datas': datas,\n }", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n\n for order in self.browse(cr, uid, ids, context=context):\n# pay_acc_id = order.partner_id.property_account_payable.id\n #use a new method to get the account_id\n pay_acc_id = self._get_inv_pay_acc_id(cr,uid,order) \n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error!'),\n _('Define purchase journal for this company: \"%s\" (id:%d).') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n #check if this line have quantity to generate invoice, by johnw\n if po_line.product_qty <= po_line.invoice_qty:\n continue \n# if po_line.product_id:\n# acc_id = po_line.product_id.property_account_expense.id\n# if not acc_id:\n# acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n# if not acc_id:\n# raise osv.except_osv(_('Error!'), _('Define expense account for this company: \"%s\" (id:%d).') % (po_line.product_id.name, po_line.product_id.id,))\n# else:\n# acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id \n #use a new method to get the account_id, by johnw \n acc_id = self._get_inv_line_exp_acc_id(cr,uid,order,po_line)\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n #update the quantity to the quantity, by johnw\n inv_line_data.update({'quantity':(po_line.product_qty - po_line.invoice_qty)})\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n \n #if no lines then return direct, by johnw\n if len(inv_lines) == 0:\n continue\n \n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)],\n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or False,\n 'payment_term': order.payment_term_id.id or False,\n 'company_id': order.company_id.id,\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id.sequence_id:\n raise osv.except_osv(_('Error!'), _('Please define sequence on the journal related to this invoice.'))\n if not inv.invoice_line:\n raise osv.except_osv(_('No Invoice Lines!'), _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id], {'date_invoice': fields.date.context_today(self,cr,uid,context=context)}, context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid, inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid, group_check_total_id, context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding/2.0)):\n raise osv.except_osv(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise osv.except_osv(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n entry_type = 'journal_pur_voucher'\n if inv.type == 'in_refund':\n entry_type = 'cont_voucher'\n else:\n ref = self._convert_ref(cr, uid, inv.number)\n entry_type = 'journal_sale_vou'\n if inv.type == 'out_refund':\n entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n # kittiu\n #if inv.payment_term:\n if inv.payment_term and not inv.date_due:\n # --\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t[1],\n 'account_id': acc_id,\n 'date_maturity': t[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0,0,self.line_get_convert(cr, uid, x, part.id, date, context=ctx)),iml)\n\n line = self.group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise osv.except_osv(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.date_invoice, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id,'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n self._log_event(cr, uid, ids)\n return True", "def action_invoice_create(self, cr, uid, ids, grouped=False, states=None, date_invoice=False, context=None):\n order = self.browse(cr, uid, ids[0], context=context)\n inv_obj = self.pool.get('account.invoice')\n # create the invoice\n inv_id = super(sale_order, self).action_invoice_create(cr, uid, ids, grouped, states, date_invoice, context=context)\n # modify the invoice\n inv_obj.write(cr, uid, [inv_id], {'past_doc': order.past_doc})\n return inv_id", "def action_move_create(self, cr, uid, ids, context=None):\n ait_obj = self.pool.get('account.invoice.tax')\n cur_obj = self.pool.get('res.currency')\n period_obj = self.pool.get('account.period')\n payment_term_obj = self.pool.get('account.payment.term')\n journal_obj = self.pool.get('account.journal')\n move_obj = self.pool.get('account.move')\n if context is None:\n context = {}\n for inv in self.browse(cr, uid, ids, context=context):\n if not inv.journal_id:\n raise orm.except_orm(_('Error!'),\n _('Journal not defined for this invoice!'))\n if not inv.journal_id.iva_registry_id:\n raise orm.except_orm(_('Error!'),\n _('You must link %s with a VAT registry!') % (inv.journal_id.name))\n if not inv.journal_id.sequence_id:\n raise orm.except_orm(_('Error!'),\n _('Please define sequence on the journal related to this invoice.')) \n if not inv.invoice_line:\n raise orm.except_orm(_('No Invoice Lines!'),\n _('Please create some invoice lines.'))\n if inv.move_id:\n continue\n\n ctx = context.copy()\n ctx.update({'lang': inv.partner_id.lang})\n if not inv.date_invoice:\n self.write(cr, uid, [inv.id],\n {'date_invoice': fields.date.context_today(self,\n cr,\n uid,\n context=context)},\n context=ctx)\n company_currency = self.pool['res.company'].browse(cr, uid,\n inv.company_id.id).currency_id.id\n # create the analytical lines\n # one move line per invoice line\n # iml = self._get_analytic_lines(cr, uid, inv.id, context=ctx)\n iml = super(account_invoice_makeover, self)._get_analytic_lines(cr, uid, inv.id, context=ctx)\n # check if taxes are all computed\n compute_taxes = ait_obj.compute(cr, uid, inv.id, context=ctx)\n # self.check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n super(account_invoice_makeover, self).check_tax_lines(cr, uid, inv, compute_taxes, ait_obj)\n\n # I disabled the check_total feature\n group_check_total_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'group_supplier_inv_check_total')[1]\n group_check_total = self.pool.get('res.groups').browse(cr, uid,\n group_check_total_id,\n context=context)\n if group_check_total and uid in [x.id for x in group_check_total.users]:\n if (inv.type in ('in_invoice', 'in_refund') and abs(inv.check_total - inv.amount_total) >= (inv.currency_id.rounding / 2.0)):\n raise orm.except_orm(_('Bad Total!'), _('Please verify the price of the invoice!\\nThe encoded total does not match the computed total.'))\n\n if inv.payment_term:\n total_fixed = total_percent = 0\n for line in inv.payment_term.line_ids:\n if line.value == 'fixed':\n total_fixed += line.value_amount\n if line.value == 'procent':\n total_percent += line.value_amount\n total_fixed = (total_fixed * 100) / (inv.amount_total or 1.0)\n if (total_fixed + total_percent) > 100:\n raise orm.except_orm(_('Error!'), _(\"Cannot create the invoice.\\nThe related payment term is probably misconfigured as it gives a computed amount greater than the total invoiced amount. In order to avoid rounding issues, the latest line of your payment term must be of type 'balance'.\"))\n\n # one move line per tax line\n iml += ait_obj.move_line_get(cr, uid, inv.id)\n\n# entry_type = ''\n if inv.type in ('in_invoice', 'in_refund'):\n ref = inv.reference\n# entry_type = 'journal_pur_voucher'\n# if inv.type == 'in_refund':\n# entry_type = 'cont_voucher'\n else:\n # ref = self._convert_ref(cr, uid, inv.number)\n ref = super(account_invoice_makeover, self)._convert_ref(cr, uid, inv.number)\n# entry_type = 'journal_sale_vou'\n# if inv.type == 'out_refund':\n# entry_type = 'cont_voucher'\n\n diff_currency_p = inv.currency_id.id <> company_currency\n # create one move line for the total and possibly adjust the other lines amount\n total = 0\n total_currency = 0\n # total, total_currency, iml = self.compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n total, total_currency, iml = super(account_invoice_makeover, self).compute_invoice_totals(cr, uid, inv, company_currency, ref, iml, context=ctx)\n acc_id = inv.account_id.id\n\n name = inv['name'] or inv['supplier_invoice_number'] or '/'\n totlines = False\n if inv.payment_term:\n totlines = payment_term_obj.compute(cr,\n uid, inv.payment_term.id, total, inv.date_invoice or False, context=ctx)\n if totlines:\n res_amount_currency = total_currency\n i = 0\n ctx.update({'date': inv.date_invoice})\n for t_line in totlines:\n if inv.currency_id.id != company_currency:\n amount_currency = cur_obj.compute(cr, uid, company_currency, inv.currency_id.id, t_line[1], context=ctx)\n else:\n amount_currency = False\n\n # last line add the diff\n res_amount_currency -= amount_currency or 0\n i += 1\n if i == len(totlines):\n amount_currency += res_amount_currency\n\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': t_line[1],\n 'account_id': acc_id,\n 'date_maturity': t_line[0],\n 'amount_currency': diff_currency_p \\\n and amount_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': t_line[2]\n })\n else:\n iml.append({\n 'type': 'dest',\n 'name': name,\n 'price': total,\n 'account_id': acc_id,\n 'date_maturity': inv.date_due or False,\n 'amount_currency': diff_currency_p \\\n and total_currency or False,\n 'currency_id': diff_currency_p \\\n and inv.currency_id.id or False,\n 'ref': ref,\n 'payment_type': None\n })\n\n date = inv.date_invoice or time.strftime('%Y-%m-%d')\n\n part = self.pool.get(\"res.partner\")._find_accounting_partner(inv.partner_id)\n\n line = map(lambda x:(0, 0, self.line_get_convert(cr, uid, x, part.id, date, context=ctx)), iml)\n\n # line = self.group_lines(cr, uid, iml, line, inv)\n line = super(account_invoice_makeover, self).group_lines(cr, uid, iml, line, inv)\n\n journal_id = inv.journal_id.id\n journal = journal_obj.browse(cr, uid, journal_id, context=ctx)\n if journal.centralisation:\n raise orm.except_orm(_('User Error!'),\n _('You cannot create an invoice on a centralized journal. Uncheck the centralized counterpart box in the related journal from the configuration menu.'))\n\n line = self.finalize_invoice_move_lines(cr, uid, inv, line)\n\n move = {\n 'ref': inv.reference and inv.reference or inv.name,\n 'line_id': line,\n 'journal_id': journal_id,\n 'date': date,\n 'narration': inv.comment,\n 'company_id': inv.company_id.id,\n }\n period_id = inv.period_id and inv.period_id.id or False\n ctx.update(company_id=inv.company_id.id,\n account_period_prefer_normal=True)\n if not period_id:\n period_ids = period_obj.find(cr, uid, inv.registration_date, context=ctx)\n period_id = period_ids and period_ids[0] or False\n if period_id:\n move['period_id'] = period_id\n for i in line:\n i[2]['period_id'] = period_id\n\n ctx.update(invoice=inv)\n move_id = move_obj.create(cr, uid, move, context=ctx)\n new_move_name = move_obj.browse(cr, uid, move_id, context=ctx).name\n # make the invoice point to that move\n self.write(cr, uid, [inv.id], {'move_id': move_id, 'period_id':period_id, 'move_name':new_move_name}, context=ctx)\n # Pass invoice in context in method post: used if you want to get the same\n # account move reference when creating the same invoice after a cancelled one:\n move_obj.post(cr, uid, [move_id], context=ctx)\n # self._log_event(cr, uid, ids)\n super(account_invoice_makeover, self)._log_event(cr, uid, ids)\n return True", "def _create_invoice(self):\n self.ensure_one()\n partner = self.member_id.partner_id\n invoice = self.env['account.invoice'].create({\n 'partner_id': partner.id,\n 'account_id': partner.property_account_receivable_id.id,\n 'fiscal_position_id': partner.property_account_position_id.id\n })\n for line in self.line_ids:\n product = line.activity_id.product_id\n # Handling of invoice lines : needs cache record for onchange, then\n # real writing...\n invoice_line = self.env['account.invoice.line'].new({\n 'product_id': product.id,\n 'invoice_id': invoice.id\n })\n invoice_line._onchange_product_id()\n line_values = dict(invoice_line._cache)\n line_values['price_unit'] = line.price\n invoice_line = self.env['account.invoice.line'].create(line_values)\n invoice.compute_taxes()\n line.registration_id.invoice_line_id = invoice_line.id\n return invoice", "def action_invoice_create(self, cr, uid, ids, context=None):\n res = False\n\n journal_obj = self.pool.get('account.journal')\n inv_obj = self.pool.get('account.invoice')\n inv_line_obj = self.pool.get('account.invoice.line')\n fiscal_obj = self.pool.get('account.fiscal.position')\n property_obj = self.pool.get('ir.property')\n\n for order in self.browse(cr, uid, ids, context=context):\n pay_acc_id = order.partner_id.property_account_payable.id\n journal_ids = journal_obj.search(cr, uid, [('type', '=','purchase'),('company_id', '=', order.company_id.id)], limit=1)\n if not journal_ids:\n raise osv.except_osv(_('Error !'),\n _('There is no purchase journal defined for this company: \"%s\" (id:%d)') % (order.company_id.name, order.company_id.id))\n\n # generate invoice line correspond to PO line and link that to created invoice (inv_id) and PO line\n inv_lines = []\n for po_line in order.order_line:\n if po_line.product_id:\n acc_id = po_line.product_id.product_tmpl_id.property_account_expense.id\n if not acc_id:\n acc_id = po_line.product_id.categ_id.property_account_expense_categ.id\n if not acc_id:\n raise osv.except_osv(_('Error !'), _('There is no expense account defined for this product: \"%s\" (id:%d)') % (po_line.product_id.name, po_line.product_id.id,))\n else:\n acc_id = property_obj.get(cr, uid, 'property_account_expense_categ', 'product.category').id\n fpos = order.fiscal_position or False\n acc_id = fiscal_obj.map_account(cr, uid, fpos, acc_id)\n\n inv_line_data = self._prepare_inv_line(cr, uid, acc_id, po_line, context=context)\n inv_line_id = inv_line_obj.create(cr, uid, inv_line_data, context=context)\n inv_lines.append(inv_line_id)\n\n po_line.write({'invoiced':True, 'invoice_lines': [(4, inv_line_id)]}, context=context)\n\n # get invoice data and create invoice\n inv_data = {\n 'name': order.partner_ref or order.name,\n 'reference': order.partner_ref or order.name,\n 'account_id': pay_acc_id,\n 'type': 'in_invoice',\n 'partner_id': order.partner_id.id,\n 'currency_id': order.pricelist_id.currency_id.id,\n 'address_invoice_id': order.partner_address_id.id,\n 'address_contact_id': order.partner_address_id.id,\n 'journal_id': len(journal_ids) and journal_ids[0] or False,\n 'invoice_line': [(6, 0, inv_lines)], \n 'origin': order.name,\n 'fiscal_position': order.fiscal_position.id or order.partner_id.property_account_position.id,\n 'payment_term': order.partner_id.property_payment_term and order.partner_id.property_payment_term.id or False,\n 'company_id': order.company_id.id,\n 'add_disc': order.add_disc or 0.0\n }\n inv_id = inv_obj.create(cr, uid, inv_data, context=context)\n\n # compute the invoice\n inv_obj.button_compute(cr, uid, [inv_id], context=context, set_total=True)\n\n # Link this new invoice to related purchase order\n order.write({'invoice_ids': [(4, inv_id)]}, context=context)\n res = inv_id\n return res", "def action_move_create(self):\n\t\taccount_move = self.env['account.move']\n\n\t\tfor inv in self:\n\t\t\tif not inv.journal_id.sequence_id:\n\t\t\t\traise UserError(_('Please define sequence on the journal related to this invoice.'))\n\t\t\tif not inv.invoice_line_ids.filtered(lambda line: line.account_id):\n\t\t\t\traise UserError(_('Please add at least one invoice line.'))\n\t\t\tif inv.move_id:\n\t\t\t\tcontinue\n\n\n\t\t\tif not inv.date_invoice:\n\t\t\t\tinv.write({'date_invoice': fields.Date.context_today(self)})\n\t\t\tif not inv.date_due:\n\t\t\t\tinv.write({'date_due': inv.date_invoice})\n\t\t\tcompany_currency = inv.company_id.currency_id\n\n\t\t\t# create move lines (one per invoice line + eventual taxes and analytic lines)\n\t\t\timl = inv.invoice_line_move_line_get()\n\t\t\timl += inv.tax_line_move_line_get()\n\n\t\t\tdiff_currency = inv.currency_id != company_currency\n\t\t\t# create one move line for the total and possibly adjust the other lines amount\n\t\t\ttotal, total_currency, iml = inv.compute_invoice_totals(company_currency, iml)\n\n\t\t\tname = inv.name or ''\n\t\t\tif inv.payment_term_id:\n\t\t\t\ttotlines = inv.payment_term_id.with_context(currency_id=company_currency.id).compute(total, inv.date_invoice)[0]\n\t\t\t\tres_amount_currency = total_currency\n\t\t\t\tfor i, t in enumerate(totlines):\n\t\t\t\t\tif inv.currency_id != company_currency:\n\t\t\t\t\t\tamount_currency = company_currency._convert(t[1], inv.currency_id, inv.company_id, inv._get_currency_rate_date() or fields.Date.today())\n\t\t\t\t\telse:\n\t\t\t\t\t\tamount_currency = False\n\n\t\t\t\t\t# last line: add the diff\n\t\t\t\t\tres_amount_currency -= amount_currency or 0\n\t\t\t\t\tif i + 1 == len(totlines):\n\t\t\t\t\t\tamount_currency += res_amount_currency\n\n\t\t\t\t\t_logger.info(inv)\n\t\t\t\t\timl.append({\n\t\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t\t'name': name,\n\t\t\t\t\t\t'price': t[1],\n\t\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t\t'date_maturity': t[0],\n\t\t\t\t\t\t'amount_currency': diff_currency and amount_currency,\n\t\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\t\t\telse:\n\t\t\t\t_logger.info(inv)\n\t\t\t\ttotal_taxes_to_pay = self.return_tax_to_payy()\n\n\t\t\t\tif inv.taxes_collected_id.type_taxes == 'tax_company':\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total_taxes_to_pay,\n\t\t\t\t\t'account_id': inv.taxes_collected_id.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total- total_taxes_to_pay,\n\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t\t})\n\n\t\t\t\telse:\n\t\t\t\t\timl.append({\n\t\t\t\t\t'type': 'dest',\n\t\t\t\t\t'name': name,\n\t\t\t\t\t'price': total,\n\t\t\t\t\t'account_id': inv.account_id.id,\n\t\t\t\t\t'date_maturity': inv.date_due,\n\t\t\t\t\t'amount_currency': diff_currency and total_currency,\n\t\t\t\t\t'currency_id': diff_currency and inv.currency_id.id,\n\t\t\t\t\t'invoice_id': inv.id,\n\t\t\t\t\t#'partner_id': inv.partner_line_id.id\n\t\t\t\t})\n\n\t\t\tpart = self.env['res.partner']._find_accounting_partner(inv.partner_id)\n\n\t\t\t#validamo que sea una factura de proveedor\n\t\t\tif self.type == 'in_invoice':\n\t\t\t\tdata_new = []\n\t\t\t\tfor l in iml:\n\t\t\t\t\tif 'partner_id' in l:\n\t\t\t\t\t\tif l['partner_id']:\n\t\t\t\t\t\t\tdata_new.append((0, 0, self.line_get_convert(l, l['partner_id'])) )\n\t\t\t\t\telse:\n\t\t\t\t\t\tdata_new.append((0, 0, self.line_get_convert(l, part.id)) )\n\n\t\t\t\tline = [l for l in data_new ]\n\t\t\telse:\n\t\t\t\tline = [(0, 0, self.line_get_convert(l, part.id)) for l in iml ]\n\n\t\t\tline = inv.group_lines(iml, line)\n\n\t\t\tline = inv.finalize_invoice_move_lines(line)\n\n\t\t\tdate = inv.date or inv.date_invoice\n\t\t\tmove_vals = {\n\t\t\t\t'ref': inv.reference,\n\t\t\t\t'line_ids': line,\n\t\t\t\t'journal_id': inv.journal_id.id,\n\t\t\t\t'date': date,\n\t\t\t\t'narration': inv.comment,\n\t\t\t}\n\n\t\t\tmove = account_move.create(move_vals)\n\t\t\t# Pass invoice in method post: used if you want to get the same\n\t\t\t# account move reference when creating the same invoice after a cancelled one:\n\t\t\tmove.post(invoice = inv)\n\t\t\t# make the invoice point to that move\n\t\t\tvals = {\n\t\t\t\t'move_id': move.id,\n\t\t\t\t'date': date,\n\t\t\t\t'move_name': move.name,\n\t\t\t}\n\t\t\tinv.write(vals)\n\t\treturn True", "def compute_counterpart_lines(self):\n for item in self:\n move_debit_lines = []\n move_credit_lines = []\n\n # list of all the move lines of the payment's move\n line_list = []\n for entry in item.journal_entry_ids:\n for line in entry.line_ids:\n if line.account_id.treasury_planning:\n line_list.append(line)\n\n # for each line above collect all the reconciled counterpart lines\n for line in line_list:\n if line.credit > 0 and line.debit == 0:\n for match in line.matched_debit_ids:\n move_debit_lines.append(match.debit_move_id.id)\n\n if line.credit == 0 and line.debit > 0:\n for match in line.matched_credit_ids:\n move_credit_lines.append(match.credit_move_id.id)\n\n if move_credit_lines:\n counterpart_move_ids = move_credit_lines\n else:\n counterpart_move_ids = move_debit_lines\n\n # bank move share is transformed to dictionary\n bank_move_dict = (ast.literal_eval(item.cf_share) if\n item.cf_share else {})\n\n # the share of each counterpart line is \"merged or added\"\n # in a weighted manner to the bank line share\n for cpt in counterpart_move_ids:\n dest_move_line = self.env['account.move.line'].browse(cpt)\n weight = round(dest_move_line.balance / item.amount, 2)\n # counterpart share is transformed into dictionary\n move_line_dict = ast.literal_eval(dest_move_line.cf_share)\n\n # each key is finally added to the bank line share\n for key, value in move_line_dict.iteritems():\n draft_dictionary = dictop.sum_dictionary(\n bank_move_dict.get(key, {}), 1,\n move_line_dict.get(key, {}), weight)\n bank_move_dict[key] = dictop.check_dict_total(\n draft_dictionary, 1)\n\n # the dictionary is transformed into string and assigned\n item.cf_share = json.dumps(bank_move_dict)", "def compute_advice(self):\n for advice in self:\n old_lines = self.env['hr.payroll.advice.line'].search([('advice_id', '=', advice.id)])\n if old_lines:\n old_lines.unlink()\n payslips = self.env['hr.payslip'].search([('date_from', '<=', advice.date), ('date_to', '>=', advice.date), ('state', '=', 'done')])\n for slip in payslips:\n if not slip.sudo().employee_id.bank_account_id and not slip.sudo().employee_id.bank_account_id.acc_number:\n raise UserError(_('Please define bank account for the %s employee') % (slip.employee_id.name,))\n payslip_line = self.env['hr.payslip.line'].search([('slip_id', '=', slip.id), ('code', '=', 'NET')], limit=1)\n if payslip_line:\n self.env['hr.payroll.advice.line'].create({\n 'advice_id': advice.id,\n 'name': slip.sudo().employee_id.bank_account_id.acc_number,\n 'ifsc_code': slip.sudo().employee_id.bank_account_id.bank_bic or '',\n 'employee_id': slip.employee_id.id,\n 'bysal': payslip_line.total\n })\n slip.advice_id = advice.id" ]
[ "0.6526457", "0.60499185", "0.6039584", "0.5971947", "0.5949904", "0.5877273", "0.58699375", "0.5868027", "0.5864572", "0.57496387", "0.56165034", "0.5592037", "0.55525774", "0.5516271", "0.5495002", "0.5483541", "0.5412485", "0.5412102", "0.53373694", "0.53041404", "0.5290604", "0.52692235", "0.5256279", "0.5249647", "0.5236856", "0.5236116", "0.52341986", "0.52206016", "0.5200139", "0.51388955" ]
0.7914905
0
r"""Return the standard path to the shared area on the current platform.
def shared_area_path() -> str: try: return os.environ["OITG_SHARED_AREA"] except KeyError: pass if os.name == "nt": # Windows return "Z:\\" if os.name == "unix" or os.name == "posix": # Linux / OSX / ... return os.path.expanduser("~/steaneShared/") raise Exception("Unknown OS")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def path_share(self) -> Path:\n return self.path_supervisor / SHARE_DATA", "def get_path(self):\n\t\treturn call_sdk_function('PrlShare_GetPath', self.handle)", "def path_extern_supervisor(self) -> PurePath:\n return PurePath(os.environ[ENV_SUPERVISOR_SHARE])", "def path_extern_mounts(self) -> PurePath:\n return self.path_extern_supervisor / MOUNTS_FOLDER", "def localPath(self):\n return self.home", "def _get_mount_path(self, connection_info):\n share = self._normalize_export(connection_info['data']['export'])\n return os.path.join(self._get_mount_point_base(),\n utils.get_hash_str(share))", "def path_extern_share(self) -> PurePath:\n return PurePath(self.path_extern_supervisor, SHARE_DATA)", "def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP", "def path_config(self):\n return HOMEASSISTANT_CONFIG.format(HASSIO_SHARE_INT)", "def _getSocketPath():\n return f'{CmdSsh._getSshDir()}/soos-%r@%h-%p'", "def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")", "def path(cls):\n from os.path import sep, join, exists\n from os import environ\n return join(environ.get(\"SystemRoot\", join(\"C:\", sep, \"Windows\")), \"System32\", \"mpclaim.exe\")", "def get_box_pathway():\n import os\n import sys\n sys.dont_write_bytecode = True\n user_env = os.environ['USERPROFILE']\n os.chdir(user_env)\n directory_list = os.listdir(user_env)\n Box_boolean = 'Box' in directory_list\n Box_Sync_boolean = 'Box Sync' in directory_list\n if Box_boolean is False and Box_Sync_boolean is False:\n raise ValueError('Box or Box Sync is not in your pathway')\n elif Box_boolean is True and Box_Sync_boolean is True:\n raise ValueError('Program does not know whether to distinguish Box or Box Sync')\n else:\n if Box_boolean is True:\n return user_env + '\\Box\\\\'\n elif Box_Sync_boolean is True:\n return user_env + '\\Box Sync\\\\'", "def get_home_path(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHomePath', self.handle)", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def get_kernel_path():\n path = \"/\".join(os.path.dirname(os.path.realpath(__file__)).split('/')[:-1])\n return path+'/src/'", "def os_path(self, **kw):\n with_drive = kw.get(\"with_drive\", True)\n if os.name == \"nt\":\n return self.windows_path(with_drive=with_drive)\n return self.posix_path(with_drive=with_drive)", "def _get_os_path(self, name=None, path=''):\n\t\t\n\t\tif self.notebook_dir:\n\t\t\tout_path =os.path.join( self.notebook_dir, path.lstrip('/'))\n\t\telse:\n\t\t\tout_path = path\n\t\t\n\t\tif name:\n\t\t\tout_path = os.path.join(out_path, name.lstrip('/'))\n\t\t\n\t\treturn out_path", "def _get_local_share_path(self, share):\n local_vol_path = self._get_mount_point_for_gluster_vol()\n if not os.access(local_vol_path, os.R_OK):\n raise exception.GlusterfsException('share path %s does not exist' %\n local_vol_path)\n return os.path.join(local_vol_path, share['name'])", "def system_path(self, path):\n return os.path.join(self.prefix, path.lstrip('/'))", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_home():\n try:\n return str(Path.home())\n except Exception:\n return None", "def get_absolute_pathname(self):\n return os.path.join(settings.PRIVATE_STORAGE_ROOT, self.get_relative_pathname())", "def path(self):\r\n return self.chroot", "def path(self):\r\n return self.chroot", "def get_standard_directory(self):\n dirpath = (c.c_char * 256)()\n self._lib.StGetDirectory(c.c_int(1), c.byref(dirpath), 256)\n return dirpath.value.decode('ascii')", "def _get_mount_point_for_share(self, nfs_share):\n nfs_share = nfs_share.encode('utf-8')\n return os.path.join(self.configuration.nexenta_mount_point_base,\n md5(nfs_share, usedforsecurity=False).hexdigest())", "def GetPath(self):\n sdk_root = config.Paths().sdk_root\n if not sdk_root:\n raise NoSdkRootError()\n return os.path.join(sdk_root, self.name)", "def realPath(self):\n \n return (self.useLink and [self.linkPath] or [self.installPath])[0]" ]
[ "0.75354296", "0.6952207", "0.67871875", "0.67391086", "0.67256176", "0.6657467", "0.6635167", "0.661767", "0.66105354", "0.6436675", "0.6340287", "0.6331047", "0.63205075", "0.6297639", "0.62504154", "0.6217222", "0.6186836", "0.6157988", "0.61453235", "0.6119978", "0.61148596", "0.61036146", "0.60967356", "0.6091961", "0.60649556", "0.60649556", "0.60303104", "0.6011863", "0.6002475", "0.60024244" ]
0.85109854
0
Return the path to the given users analysis directory on the shared area (``/Users//analysis``).
def analysis_root_path(user: Optional[str] = None) -> str: if user is None: user = _get_user() return os.path.join(shared_area_path(), "Users", user, "analysis")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str:\n if day is None:\n day = date.today().isoformat()\n if user is None:\n user = _get_user()\n path = os.path.join(analysis_root_path(user=user), day)\n\n if not os.access(path, os.R_OK):\n # If the dir does not exist, create it\n os.mkdir(path)\n\n return path", "def dir_user(assignment, user):\n return os.path.join(repository, assignment, user)", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def get_home_directory(self, user: str) -> str:\n process = self.run(\n \"/\",\n \"root\",\n [\"sh\", \"-c\", f\"realpath ~{user}\"],\n encoding=\"utf-8\",\n stdout=subprocess.PIPE,\n )\n return process.stdout.strip()", "def analysis_path(\n project: str, location: str, conversation: str, analysis: str,\n ) -> str:\n return \"projects/{project}/locations/{location}/conversations/{conversation}/analyses/{analysis}\".format(\n project=project,\n location=location,\n conversation=conversation,\n analysis=analysis,\n )", "def getFSUserDir(self):\n if not self.authorised:\n raise AuthError(401,\"I am sorry, but you are not authorised\")\n\n if self.authJson[\"userInfo\"] and self.authJson[\"userInfo\"][\"screenName\"]:\n fsDir = self.config.get(\"FileMan\",\"homedir\") + self.authJson[\"userInfo\"][\"screenName\"]\n return fsDir\n else: \n raise AuthError(500, \"Cannot determine the working directory - Liferay did not provide user's screenName\")", "def getUserDir() -> str:\n\n if os.name == \"nt\": # Windows system, try to return documents directory\n try:\n import ctypes.wintypes\n CSIDL_PERSONAL = 5 # Documents\n SHGFP_TYPE_CURRENT = 0 # Current value\n\n buf = ctypes.create_unicode_buffer(ctypes.wintypes.MAX_PATH)\n ctypes.windll.shell32.SHGetFolderPathW(0, CSIDL_PERSONAL, 0, SHGFP_TYPE_CURRENT, buf)\n\n return buf.value\n except ImportError:\n pass\n\n return os.path.expanduser(\"~\") # Non-Windows system, return home directory", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def get_share_path():\n cwd = os.path.dirname(__file__)\n share = os.path.join(cwd, '../share')\n return os.path.abspath(share)", "def path_to_data():\n jab = os.expanduser('~/jab')\n return os.path.join(jab, 'local/login_sums.csv')", "def user(path = None):\n if path:\n return \"%s/%s\" % (expanduser(\"~\"), path)\n else:\n return expanduser(\"~\")", "def shared_area_path() -> str:\n\n try:\n return os.environ[\"OITG_SHARED_AREA\"]\n except KeyError:\n pass\n\n if os.name == \"nt\": # Windows\n return \"Z:\\\\\"\n if os.name == \"unix\" or os.name == \"posix\": # Linux / OSX / ...\n return os.path.expanduser(\"~/steaneShared/\")\n raise Exception(\"Unknown OS\")", "def get_user_data_path():\n current_directory = os.path.dirname(os.path.realpath(__file__))\n return os.path.join(current_directory, 'emergency_fund_info.json')", "def get_downloadpath(user_id):\r\n path = settings.DOCUMENT_PATH + str(user_id) + '/'\r\n if not os.path.isdir(path):\r\n os.mkdir(path)\r\n return path", "def getFSUserDir(self):\n\n return self.config.get(\"FileMan\",\"homedir\") + self.getRole()[\"roleName\"]", "def default_output_path():\n\n documents = os.path.join(os.path.expanduser('~'))\n try:\n documents = _xdg_documents_path()\n except: pass\n if platform.system() == 'Windows':\n try:\n documents = _win_documents_path()\n except: pass\n\n return os.path.join(documents, 'Topographica')", "def get_folder(self):\n return os.path.join(\n settings.PRIVATE_STORAGE_ROOT, Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2])", "def get_teamocil_dir() -> pathlib.Path:\n return pathlib.Path(\"~/.teamocil/\").expanduser()", "def get_game_dir(self, username=False):\n if not self.common and not username:\n raise RuntimeError(\"Can't determine this game's directory without username\")\n if self.common:\n subdir = \"common\"\n else:\n subdir = \"username\"\n subsubdir = self.dir\n if WIN32 or CYGWIN:\n subsubdir = subsubdir.lower()\n return os.path.join(subdir, subsubdir)", "def share_directory(self):\n # Get the user to share file/folder with.\n share_user = User.query.filter_by(email = self.email.data).first()\n if not share_user:\n return\n\n # The source to copy to another user.\n filename = os.listdir(self.path.data)[int(self.index.data)]\n src = os.path.join(self.path.data, filename)\n # Get home path for the user to share folder with.\n dst = os.path.join(share_user.get_files_path(), filename)\n # Copy source to destination.\n copytree(src, dst)", "def get_base_data_directory(self):\n user_category = ''\n directory = ''\n\n if self.session_start_date:\n start_time = self.session_start_date.split(' ')[0] #.replace('-', '')\n else:\n # PL. To avoid mixing users directory if they restart the application\n # after midnight but before 8 AM, the directory date doesn't change:\n _local_time = time.localtime()\n if _local_time[3] > 7:\n start_time = time.strftime(\"%Y-%m-%d\")\n else:\n # substract 8 hourse to current date to get yesterday's date. \n _local_time = time.gmtime((time.time() - 8*60*60))\n start_time = time.strftime(\"%Y-%m-%d\", _local_time)\n\n if self.is_inhouse():\n #directory = os.path.join(self.base_directory, self.endstation_name,\n # self.get_user_category(), self.get_proposal(),\n # start_time)\n directory = os.path.join(self.base_directory, start_time, self.proposal_number, self.get_proposal_number())\t \n else:\n #directory = os.path.join(self.base_directory, self.get_user_category(),\n # self.get_proposal(), self.endstation_name,\n # start_time)\n logging.debug(\"SoleilSession self.base_directory %s\" % self.base_directory)\n logging.debug(\"SoleilSession start_time %s\" % start_time)\n logging.debug(\"SoleilSession self.proposal_number %s\" % self.proposal_number)\n logging.debug(\"SoleilSession self.get_proposal_number() %s\" % self.get_proposal_number())\n directory = os.path.join(self.base_directory, start_time, self.get_proposal_number())\n\n return directory", "def user_home_path(self):\n return path.join(env.user_home, self._user_home_path)", "def _get_storage_path(self, stream_name:str=None, version:int=None, user_id:str=None):\n\n dirpath = self.data_path+\"study=\"+self.study_name+\"/\"\n\n if stream_name:\n dirpath += \"stream={0}/\".format(stream_name)\n\n if version:\n if \"stream=\" not in dirpath:\n raise ValueError(\"stream_name argument is missing.\")\n else:\n dirpath += \"version={0}/\".format(str(version))\n\n if user_id:\n if \"stream=\" not in dirpath or \"version=\" not in dirpath:\n raise ValueError(\"stream_name and/or version arguments are missing.\")\n else:\n dirpath += \"user={0}/\".format(user_id)\n\n return dirpath", "def get_storage_directory(username=None):\n\n return os.path.join(get_home_dir(username), '.cfy-agent')", "def get_disassembler_user_directory(self):\n pass", "def get_home_dir(self, username):\n user = connection.User.find_one({'email': str(username) })\n return str(user['_id'])", "def get_path(self, path):\n if path.startswith('/') and not path.startswith('~/'):\n return os.getcwd() + '/' + path\n else:\n return path", "def home_directory(self):\n out = self._call(\"GETHOMEDIRECTORY\")\n return out.json()[\"Path\"]", "def getRootDirectory(self):\n if Globals.WORKFLOWS_BASEDIR[0] == '~':\n return os.path.expanduser(Globals.WORKFLOWS_BASEDIR)\n else:\n return os.path.join('', Globals.WORKFLOWS_BASEDIR)", "def get_data_dir():\n return Path(current_app.config[\"USER_DIR\"]) / \"data\"" ]
[ "0.6710544", "0.66245717", "0.64282644", "0.61189187", "0.5994886", "0.5977346", "0.5951611", "0.59258217", "0.5919063", "0.5871089", "0.5855553", "0.5822797", "0.5740604", "0.5706992", "0.56850857", "0.5682241", "0.5660873", "0.5612151", "0.5608156", "0.5605027", "0.560351", "0.5599671", "0.55976796", "0.5572536", "0.5572053", "0.55658996", "0.5558749", "0.5538831", "0.5538418", "0.55361325" ]
0.84479433
0
Return the path to the analysis directory for the given day, defaulting to today. The analysis directory is intended to be used as working space for analysing data while it is taken, so that the code can easily be found again later if the data or conclusions reached are reexamined. If the directory does not exist, it is created.
def todays_analysis_path(day: Optional[str] = None, user: Optional[str] = None) -> str: if day is None: day = date.today().isoformat() if user is None: user = _get_user() path = os.path.join(analysis_root_path(user=user), day) if not os.access(path, os.R_OK): # If the dir does not exist, create it os.mkdir(path) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_day_data_path(self, days_ago=0):\n home = os.environ.get('USERPROFILE').replace('\\\\', '/')\n self.data_dir= os.path.join(home, 'TimeData')\n if not os.path.isdir(self.data_dir):\n mkdir(self.data_dir)\n today_filename = os.path.join(\n self.data_dir,\n (datetime.now()-timedelta(days=days_ago)).strftime('%Y-%m-%d.json'))\n return today_filename", "def filepath(day, ind):\n if ind!=\"TradeReport\" and ind!=\"OrderDetail\" and ind!=\"OrderHistory\":\n raise NameError(' ind must be either TradeReport or OrderDetail')\n \n elif day<1 or day>31 or type(day)!=int:\n raise TypeError('day must be an integer between 1 and 31')\n \n if day<10:\n day=\"0\"+str(day)\n else:\n day=str(day)\n \n path=\"/data/LSE_DATA/raw/T_\" + ind + \"_\"+ day +\"012008.csv/\" + \"t_\" + ind +\".csv\"\n\n return path", "def _get_new_measurement_path() -> pathlib.Path:\n today = strftime(\"%Y%m%d\")\n today_path = DATA_DIR / today\n new_path = get_unique_path(today_path, 'measurement_{:03d}')\n return new_path", "def journal_today():\n today = pendulum.today()\n LOG.debug('Today: %s', today)\n\n path = PATH_FORMAT.format(year=today.year, month=today.month, date=today.to_date_string())\n path = pathlib.Path(path).expanduser()\n LOG.debug('Path of today: %s', path)\n\n parent = path.parent\n if not parent.exists():\n parent.mkdir(parents=True, exist_ok=True)\n LOG.debug('Created new path: %s', parent)\n\n if not path.exists():\n shutil.copyfile(TEMPLATE_PATH, path)\n\n return path", "def prepare_folder(self) -> str:\n base_folder = self.config['info']['folder']\n today_folder = f'{datetime.today():%Y-%m-%d}'\n folder = os.path.join(base_folder, today_folder)\n if not os.path.isdir(folder):\n os.makedirs(folder)\n return folder", "def _set_output_dir(self):\n return os.path.join(self.outputDir,\n datetime.datetime.utcnow().strftime(\"%Y%m%d\"))", "def _get_directory(self):\n directory = os.environ.get(\"EEMETER_WEATHER_CACHE_DIRECTORY\",\n os.path.expanduser('~/.eemeter/cache'))\n if not os.path.exists(directory):\n os.makedirs(directory)\n return directory", "def day_name():\n file_path = os.path.dirname(__file__)\n day_path = os.path.normpath(os.path.join(file_path, '..'))\n return os.path.basename(day_path)", "def create_output_folder(self):\n if not os.path.exists(self.current_path):\n os.mkdir(self.current_path)\n data_dir_by_date = datetime.datetime.now().strftime(\n \"data-%d-%b_%H-%M-%S\")\n self.date_path = os.path.join(self.current_path, data_dir_by_date)\n if not os.path.exists(self.date_path):\n os.mkdir(self.date_path)", "def calc_directory(init_dir, dbg=False):\n dt_str, _ = calc_date_time()\n dt_final = os.sep.join([init_dir, dt_str])\n\n dbc.print_helper((\"Dir: \" + dt_final), dbg=dbg)\n return dt_final, dt_str", "def get_dag_directory(self) -> str:\n if isinstance(self._dag_directory, Path):\n return str(self._dag_directory.resolve())\n else:\n return str(self._dag_directory)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def _get_default_path(self):\n return os.path.join(cfg.DATA_DIR, 'VOCdevkit' + self._year)", "def tmpdir(self):\n dir_ = os.path.dirname(self.filename)\n try:\n path = At_code_checker.Dir_Map[dir_.lower()]\n if not os.path.isdir(path):\n create_dir(path)\n except KeyError:\n path = self.get_temp_dir()\n At_code_checker.Dir_Map[dir_.lower()] = path\n finally:\n return path", "def _get_new_log_file(self, date, hour):\n\n # Get folder name for this particular date\n dayfolder = str(date)\n\n # Generate new log directory if necessary\n if not os.path.exists('/'.join([self._logpath, dayfolder])):\n self._make_log_dir(dayfolder)\n\n # Return the path to the current log file\n filename = str(hour) + '.txt'\n return '/'.join([self._logpath, dayfolder, filename])", "def get_default_data_dir(self):\n data_dir_path = os.path.join(self.comicsite.short_name,self.folder_prefix,self.cleantitle)\n return data_dir_path", "def _join_path(\n year: int, day: int, session: str, file_type: Optional[str] = None\n) -> str:\n cache_location = user_cache_dir(appname=\"advent-of-code\")\n cache_file = os.path.join(cache_location, str(session), str(year), str(day))\n if file_type == \"input_file\":\n cache_file = os.path.join(cache_file, \"input.txt\")\n if file_type == \"submission_file\":\n cache_file = os.path.join(cache_file, \"submission.txt\")\n if file_type == \"last_time_file\":\n cache_file = os.path.join(cache_file, \"time.txt\")\n return cache_file", "def _get_session_dir(self):\n\n fnd = os.path.join(self.config.capture_dir, self.timestamp.date_string(), self.timestamp.time_string())\n if not os.path.isdir(fnd):\n os.makedirs(fnd)\n\n return fnd", "def submission_dir(self):\n submissions_dir = osp.join(self.root, \"submissions\")\n date = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['year', 'month', 'day']])\n time = '-'.join([\n f'{getattr(datetime.now(), x)}'\n for x in ['hour', 'minute', 'second']])\n submission_name = f'{date}_{time}'\n path = osp.join(submissions_dir, submission_name)\n return path", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def working_directory(self):\n project_full_path = \"/\".join(self.file_name.split(\"/\")[:-1])\n file_name = self.file_name.split(\"/\")[-1]\n if \".h5\" in file_name:\n file_name = file_name.split(\".h5\")[0]\n file_name += \"_hdf5\"\n if self.h5_path[0] == \"/\":\n h5_path = self.h5_path[1:]\n else:\n h5_path = self.h5_path\n return posixpath.join(project_full_path, file_name, h5_path)", "def get_run_directory(output_root: Union[str, Path]) -> Path:\n output_root = Path(output_root).resolve()\n launch_time = datetime.datetime.now().strftime(\"%Y_%m_%d\")\n today_runs = [\n int(run_dir.name.split(\".\")[1])\n for run_dir in output_root.iterdir()\n if run_dir.name.startswith(launch_time)\n ]\n run_version = max(today_runs) + 1 if today_runs else 1\n datetime_dir = output_root / f\"{launch_time}.{run_version:0>2}\"\n return datetime_dir", "def init_logs_directory(self):\n \n return self.join_and_init_path(self.get_data_general_directory, PATH_FOR_LOGS)", "def _default_log_dir():\n config_dir = os.path.abspath(os.path.dirname(self.config_filepath))\n log_dir = os.path.join(config_dir, \"logs\")\n if not os.path.isdir(log_dir):\n os.mkdir(log_dir)\n return log_dir", "def get_current_day() -> int:\n return datetime.now().day", "def get_first_of_day(self, folder_before=None, day=datetime.today(), filename='Epikurve.csv'):\n folders = os.listdir(self.data_root_path)\n folders.sort(reverse=True)\n reached = folder_before is not None\n __folder_before = str(folder_before).split('/')[-1]\n for folder in folders:\n if reached:\n path_csv = self.data_root_path / folder / filename\n with open(path_csv) as f:\n first = True\n for x in csv.reader(f, delimiter=';'):\n if first:\n first = False\n continue\n ts = datetime.strptime(x[2], '%Y-%m-%dT%H:%M:%S')\n break\n if ts.date() <= day.date():\n return folder\n else:\n if folder == __folder_before:\n reached = True", "def make_experiment_directory(path='',config=None,default_dir='_runs'):\n directory = path\n if not path:\n timestamp = datetime.now().strftime('%Y-%m-%dT%H-%M-%S-%f')\n directory = os.path.join(default_dir,timestamp)\n directory = os.path.abspath(directory) \n if os.path.isdir(directory) and not config.override and not config.cloud:\n raise ValueError(\n 'directory already exists, use --override option: %s'\n % directory)\n elif os.path.isdir(directory) and not config.cloud: \n rmtree(directory)\n if not config.cloud: \n os.makedirs(directory)\n if config:\n config.wdir = directory \n return directory", "def get_denoiser_data_dir():\r\n dir = get_qiime_project_dir() + \"/qiime/support_files/denoiser/Data/\"\r\n return dir", "def simulation_dir(self):\n try:\n return (self.output_directory / self.sim_id).expand()\n except AttributeError:\n return Path()", "def get_counts_dir(cls, event_type):\n if 'rna_events_dir' in cls.global_settings:\n return os.path.join(cls.global_settings['rna_events_dir'],\n event_type)\n return None" ]
[ "0.62076753", "0.5934615", "0.59194785", "0.58207476", "0.57378083", "0.5428975", "0.54136276", "0.53955543", "0.5333741", "0.5282543", "0.5224942", "0.519696", "0.519696", "0.5180282", "0.5148305", "0.51434815", "0.5098279", "0.5076191", "0.5064754", "0.50544584", "0.5054282", "0.5029764", "0.5018956", "0.5011002", "0.5006264", "0.50057095", "0.4997772", "0.4975446", "0.49469227", "0.4939346" ]
0.7822021
0
Return the path to an experiment's ARTIQ results directory. The standard results path is ``/artiqResults/``.
def artiq_results_path(experiment: Optional[str] = None) -> str: path = os.path.join(shared_area_path(), "artiqResults") if experiment is None: try: experiment = os.environ["OITG_EXPERIMENT"] except KeyError: raise Exception( "No experiment supplied, and no OITG_EXPERIMENT environment key") return os.path.join(path, experiment)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def data_abex_results_dir(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / \"Results\"", "def data_abex_results_iteration_dir(experiment_name: str, iteration: int) -> Path: # pragma: no cover\n return data_abex_results_dir(experiment_name) / iteration_name(iteration)", "def _get_results_path(self):\n # if we already have the results path set, please return it\n if self._results_path is not None:\n return self._results_path\n\n self._validate_results_path()\n\n path = self.results_path\n\n if path is None:\n for i in range(1, 10001):\n name = f\"AutoML_{i}\"\n if not os.path.exists(name):\n self.create_dir(name)\n self._results_path = name\n return name\n # If it got here, could not create, raise expection\n raise AutoMLException(\"Cannot create directory for AutoML results\")\n elif os.path.exists(self.results_path) and os.path.exists(\n os.path.join(self.results_path, \"params.json\")\n ): # AutoML already loaded, return path\n self._results_path = path\n return path\n # Dir does not exist, create it\n elif not os.path.exists(path):\n self.create_dir(path)\n self._results_path = path\n return path\n # Dir exists and is empty, use it\n elif os.path.exists(path) and not len(os.listdir(path)):\n self._results_path = path\n return path\n elif os.path.exists(path) and len(os.listdir(path)):\n raise AutoMLException(\n f\"Cannot set directory for AutoML. Directory '{path}' is not empty.\"\n )\n\n raise AutoMLException(\"Cannot set directory for AutoML results\")", "def dir_results(assignment, user):\n return os.path.join(repository, assignment, user, 'results')", "def experiment_dir(experiment_name: str) -> Path: # pragma: no cover\n return EXPERIMENTS_DIR / experiment_name", "def ml_predict_results_path(self) -> str:\n return join(self.machine_learning_path, 'results')", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def _get_R_script_dir(self):\r\n qiime_dir = get_qiime_project_dir()\r\n script_dir = join(qiime_dir, 'qiime', 'support_files', 'R')\r\n return script_dir", "def results_dir(filename = None):\n path = 'results'\n if os.path.isdir(path):\n if not os.access(path, os.R_OK | os.W_OK):\n raise EnvironmentError(\"{0} is not readable or writable\".format(os.path.abspath(path)))\n return os.path.join(path, filename) if filename else path\n os.mkdir(path) # raises if it fails\n return os.path.join(path, filename) if filename else path", "def local_results(self):\n\n return self._local_results_path", "def get_result_path(self):\n return logPath", "def results_directory(self, run_config):\n suite_dir_name = '{}_{}'.format(run_config['test_suite_start_time'],\n run_config['test_id'])\n datetime_str = datetime.datetime.now().strftime('%Y%m%dT%H%M%S')\n test_result_dir = '{}'.format(datetime_str)\n result_dir = os.path.join(self.workspace, 'results', suite_dir_name,\n test_result_dir)\n\n # Creates workspace and default log folder\n if not os.path.exists(result_dir):\n print('Making results directory:{}'.format(result_dir))\n os.makedirs(result_dir)\n\n return result_dir", "def get_qiime_temp_dir():\r\n qiime_config = load_qiime_config()\r\n qiime_config_value = qiime_config['temp_dir']\r\n if qiime_config_value is not None:\r\n result = qiime_config_value\r\n else:\r\n result = '/tmp/'\r\n return result", "def getRelativeRootExperimentPath(self):\n return userId + \"/\" + \\\n self._rootExportPath[self._rootExportPath.rfind(self._properties['export_dir']):]", "def output_path(self) -> str:\n if self._output_path is None:\n if not self._root_folder:\n self._root_folder = self._env.experiments_folder\n folder = os.path.join(self._root_folder, self.key)\n\n if not os.path.exists(folder):\n os.makedirs(folder)\n\n self._output_path = folder\n\n return self._output_path", "def results(results_dir):\n\n if not isinstance(results_dir, str):\n raise TypeError(\"results_dir must be a string!\")\n\n if not os.path.exists(results_dir):\n os.makedirs(results_dir)\n\n return results_dir", "def get_trial_dir() -> str:\n return logging.root._log_dir # type: ignore", "def index_path(self):\n\t\treturn os.path.normpath(self.output + \"/\" + self.resultset_index)", "def remote_results(self):\n\n return self._remote_results_path", "def config_data_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / DATA_CONFIG", "def get_archive_file_path(self,results):\n path = os.path.join(self.archive_path,results.version)\n if not os.path.exists(path):\n os.makedirs(path)\n return os.path.join(path,self.get_archive_filename(results))", "def get_log_dir():\n base_dir = os.path.realpath(cfg.CONF.ruiner.log_dir.rstrip('/'))\n return os.path.join(base_dir, test_start_time_tag())", "def get_qiime_scripts_dir():\r\n script_fp = which('print_qiime_config.py')\r\n\r\n if script_fp is None:\r\n raise ScriptsDirError(\"Could not find the directory containing QIIME \"\r\n \"scripts. QIIME scripts must be accessible via \"\r\n \"the PATH environment variable, and they must \"\r\n \"be executable. Please ensure that you have a \"\r\n \"valid QIIME installation (see the QIIME \"\r\n \"Installation Guide: \"\r\n \"http://qiime.org/install/install.html).\")\r\n\r\n return dirname(script_fp)", "def config_abex_path(experiment_name: str) -> Path: # pragma: no cover\n return experiment_dir(experiment_name) / ABEX_CONFIG", "def test_get_result_directory(self):\n pass", "def get_output_path(self):\n output_path = '%s/%s' % (\n os.path.expanduser(JOB_OUTPUT_PATH), self.get_unique_name())\n return output_path", "def _fetch_results_dir(self, backend=None, results_dir=None):\n if backend is None and results_dir is not None:\n return results_dir\n elif backend is not None and results_dir is None:\n return Config().get_results_dir(backend)\n elif backend is None and results_dir is None:\n return self.nominal_results_dir\n else:\n raise ResultsAttributeError('Invalid combination of attributes!')", "def _getResultsFileName(self, toilPath):\n return os.path.join(toilPath, \"results.txt\")", "def output_path(self):\r\n return '%s/%s' % (os.path.abspath(os.path.dirname(__file__) + 'outputs'),\r\n self.identifier)" ]
[ "0.80512667", "0.7191955", "0.68996114", "0.6845597", "0.68339694", "0.66065645", "0.65667313", "0.65667313", "0.65667313", "0.65351224", "0.63939637", "0.6320295", "0.61587936", "0.6123075", "0.60877836", "0.60146594", "0.5972927", "0.5961885", "0.5882054", "0.58809346", "0.5845682", "0.5831248", "0.5815053", "0.57882553", "0.57843715", "0.574206", "0.5716158", "0.5699615", "0.5683362", "0.5673101" ]
0.873001
0
estimate an MxF user factor matrix and an FxN item factor matrix from the MxN rating matrix
def factor_mat(all_dat, f_num, iterations, regularization): # get # of users and # of items [u_num, i_num] = all_dat.shape # init user factors and item factors with random values u_fac = np.matrix(np.random.rand(u_num, f_num)) # MxF i_fac = np.matrix(np.random.rand(i_num, f_num)) # NxF # calculate the preference matrix preference = cal_preference(all_dat) # calculate the confidence matrix confidence = cal_confidence(all_dat) # recalculate the user factors and item factors using the alternating least square method for itr in range(iterations): u_fac = alternate_ls(u_num, i_fac, preference, confidence, regularization) #print itr, "u_fac" i_fac = alternate_ls(i_num, u_fac, preference.T, confidence.T, regularization) #print itr, "i_fac" # save the output df = pd.DataFrame(u_fac) df.to_csv("tmp/u_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') df = pd.DataFrame(i_fac.T) df.to_csv("tmp/i_fac.tmp", index=False, header=False, sep='\t', encoding='utf-8') # an MxF user factor matrix and an FxN item factor matrix return [u_fac, i_fac.T]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_matrix(self):\n\n self.matrix = np.zeros((len(self.users), len(self.items)))\n\n for user in self.train_set['users']:\n for item in self.train_set['feedback'][user]:\n self.matrix[self.user_to_user_id[user]][self.item_to_item_id[item]] = \\\n self.train_set['feedback'][user][item]", "def get_user_item_matrix(datafile, user_index, product_index):\n num_users = len(user_index)\n num_items = len(product_index)\n result = np.zeros((num_users, num_items))\n num_reviews = len(datafile)\n result_dense = np.zeros((num_reviews, 3))\n for line in datafile.iterrows():\n i = line[0]\n user_id = datafile['user_id'][i]\n product_id = datafile['business_id'][i]\n user = user_index[user_id]\n product = product_index[product_id]\n rating = datafile['stars'][i]\n result[user, product] = rating\n result_dense[i, 0] = user\n result_dense[i, 1] = product\n result_dense[i, 2] = rating\n return result, result_dense", "def create_user_item_matrix(data,type='unary'): \n if type == 'unary':\n\n # for unary rating drop duplicates\n data = data.drop_duplicates()\n\n # create sparse matrix\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n return matrix\n\n if type == 'count':\n\n # create sparse matrix with counted ratings\n matrix = csr_matrix((data['rating'], (data['user_id'],data['product_id'])))\n\n # rows and cols with empty values will be dropped (doesnt make any difference in size for sparse matrix, but if later converted to dense, it saves space)\n # get all non empty rows and cols\n rows, cols = matrix.nonzero()\n unique_rows = np.unique(rows)\n unique_cols = np.unique(cols)\n\n # select only rows and cols with values\n matrix = matrix[unique_rows]\n matrix = matrix[:,unique_cols]\n\n '''\n Im Gegensatz zum Memory Based approach, muss beim Model Based Approach noch das Rating angepasst werden. \n Das heisst, dass Produkte die viel eingekauft wurden ein höhers Rating erhalten und solche die weniger \n eingekauft wudren ein tieferes. Gleichzeitig muss das Maximum limitiert werden. Dies wird mittels \n dem max_count integer in der Funktion bewerkstelligt\n '''\n\n # create diagonal Matrix with 1 divided by maximum values per row. This needs to be done because there is no divide function for csr matrices implemented\n # source: https://stackoverflow.com/questions/42225269/scipy-sparse-matrix-division\n diag = diags(1/matrix.max(axis=1).A.ravel())\n\n # multipy the matrix with the maximum values to get range from 0-1\n matrix = diag.dot(matrix)\n\n # sort indices; not really needed, just cosmetics\n matrix.sort_indices()\n\n return matrix", "def example():\n num_user, num_item, ratings = build_ticket()\n \n # suffle_data\n np.random.shuffle(ratings)\n\n # split data to training & validation\n train_pct = 0.9\n train_size = int(train_pct * len(ratings))\n train = ratings[:train_size]\n validation = ratings[train_size:]\n\n # params\n num_feature = 5\n bmf_model = BayesianMatrixFactorization(\n num_user, num_item, num_feature, train, validation, max_rating=1, min_rating=0, ratingsMatirx=ratings)\n\n start_time = time.clock()\n bmf_model.estimate(10)\n end_time = time.clock()\n \n mat = np.matrix(bmf_model.item_features)\n with open('../data/ticket/item_feature', 'w') as f:\n for line in mat:\n np.savetxt(f, line, fmt='%.5f')\n\n print \"time spend = %.3f\" % (end_time - start_time)\n\n return bmf_model", "def get_user_feature_matrix_p(user_dict, user_index, aspect_index, N, popularity, A_dense, Polarity):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n max = 0\n min = 1000\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n if Polarity == False:\n count_dict[aspect] += 1\n else:\n count_dict[aspect] += review[1]\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n if count > max:\n max = count\n if count < min:\n min = count\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = (((count - min)/(max - min))*5)\n\n if len(popularity) > 0:\n col = np.zeros((len(result), 1))\n result = np.append(result, col, axis=1)\n for i in range(len(result)):\n items = A_dense[A_dense[:, 0] == i][:, 1]\n items = items.astype(int)\n result[i, len(result[1]) - 1] = np.mean(popularity[items, 1])\n return result", "def fit(self, users, items, ratings, test_users=[], test_items=[], test_ratings=[], **kargs):\n global_mean_bias_init = np.float32(np.mean(ratings))\n global_mean_bias_init = 0.01\n self.model = DeepFM_(**self.dfm_params, global_mean_bias_init=global_mean_bias_init, first_half_fit_only_fm=self.first_half_fit_only_fm)\n \n # もし、CTR予測の場合は、y=0のデータをランダム生成する。\n if self.ctr_prediction:\n users = list(users) + list(np.random.choice(list(set(users)), size=len(users)))\n items = list(items) + list(np.random.choice(list(set(items)), size=len(items)))\n ratings = list((np.array(ratings)>0).astype(int)) + [0]*len(ratings)\n test_ratings = list((np.array(test_ratings)>0).astype(int))\n \n Xi, Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(users, items)\n \n if len(test_users)>0:\n test_Xi, test_Xv = self.data_manager.transform_users_and_items_to_Xi_Xv(test_users, test_items)\n self.model.fit(Xi, Xv, ratings, test_Xi, test_Xv, test_ratings, early_stopping=True)\n else:\n self.model.fit(Xi, Xv, ratings, early_stopping=True, **kargs)\n \n # load data\n self.trained_users = list(set(users))\n self.trained_items = list(set(items))\n self.global_mean = self.model.predict(Xi, Xv).mean()", "def calculate_recommendations(self, vote_list, itemMatch, itemIgnored):\n #print \"--------------------------------------------------\"\n #print \"calculate_recommendations\"\n #print \"--------------------------------------------------\"\n\n # http://www.quuxlabs.com/blog/2010/09/matrix-factorization-a-simple-tutorial-and-implementation-in-python/\n\n # U = np.array('users')\n # D = np.array('video_games')\n\n # R = |U| cross |D|\n\n # We want to discover K latent features\n\n # Find\n # P(a | |U| corss K matrix)\n # Q(a | |D| cross K matrix)\n # Such that their product approximates R\n # R approx= P cross transpose(Q) = hat(R)\n #\n\n # r[i][j] = transpose(p)[i] * q[j]\n # = sum( 1..k, p[i][k] * q[k][j] )\n\n # e[i][j]**2 = (r[i][j] - hat(r)[i][j])**2\n # = (r[i][j] - sum( 1..K, p[i][k] * q[k][j]))**2\n # squared error, estimated rating can be either higher or lower than the real thing\n\n # find the gradient\n # diff(e[i][j]**2, p[i][k]) = -2*(r[i][j] - hat(r)[i][j]) * (q[k][j]) = -2*e[i][j] * q[k][j]\n # diff(e[i][j]**2, q[k][j]) = -2*(r[i][j] - hat(r)[i][j]) * (p[i][k]) = -2*e[i][j] * p[i][k]\n\n # update rules\n # alpha = settings.alpha # learning_rate\n # alpha = 0.0002 # learning_rate\n # p[i][k]' = p[i][k] + alpha * diff(e[i][j]**2, p[i][k])\n # = p[i][k] + 2 * alpha * e[i][j] * q[k][j]\n # q[k][j]' = q[k][j] + alpha * diff(e[i][j]**2, q[k][j])\n # = q[k][j] + 2 * alpha * e[i][j] * p[i][k]\n\n # training data\n # T = (u[i], d[j], r[i][j])\n # np.array()\n\n # iterate until convergance\n # E = sum((u[i], d[j], r[i][j]) in T, e[i][j])\n # = sum((u[i], d[j], r[i][j]) in T, r[i][j]\n # - sum(1..k, p[i][k]*q[k][j]))**2\n\n # regularization\n # beta = 0.02\n # e[i][j]**2 = (r[i][j] - sum(1..K, p[i][j]*q[k][j]))**2\n # + ((beta/2) * sum(1..K, norm(P)**2 + norm(Q)**2))\n #\n # p[i][k]' = p[i][k] + alpha * (2 * e[i][j] * q[k][j] - beta * p[i][k])\n # q[k][j]' = q[k][j] + alpha * (2 * e[i][j] * p[i][k] - beta * q[k][j])\n\n data = np.array(vote_list)\n\n encoder = OneHotEncoder()\n\n users = data[:,0]\n unique_users = list(set(users))\n for i in range(len(users)):\n users[i] = unique_users.index(users[i])\n\n video_games = data[:,1]\n unique_games = list(set(video_games))\n for i in range(len(video_games)):\n video_games[i] = unique_games.index(video_games[i])\n\n ratings = data[:,2]\n M = len(set(video_games))\n N = len(set(users))\n R = np.zeros((N,M))\n for i in range(len(users)):\n user = users[i]\n game = video_games[i]\n rating = ratings[i]\n R[user][game] = rating\n\n K = 2\n\n P = np.random.rand(N,K)\n Q = np.random.rand(M,K)\n\n nP, nQ = self.matrix_factorization(R, P, Q, K)\n nR = np.dot(nP, nQ.T)\n\n itemMatch = {}\n for i in range(N):\n user = unique_users[i]\n itemMatch[user] = []\n for j in range(M):\n if R[i][j] == 0:\n video_game = unique_games[j]\n recommendation = (video_game, nR[i][j])\n itemMatch[user].append(recommendation)\n itemMatch[None] = []\n print 'pmf recommendations', itemMatch.items()\n print '\\n'\n recommendations = itemMatch.items()\n\n # returns\n # [\n # (<user1>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # (<user2>, [\n # (\"<object_identifier1>\", <score>),\n # (\"<object_identifier2>\", <score>),\n # ]),\n # ]\n\n return recommendations", "def init_matrix(self, train, num_features):\n user_matrix = np.random.rand(self.num_users, num_features)\n item_matrix = np.random.rand(num_features, self.num_items)\n item_nnz = train.getnnz(axis=0)\n item_sum = train.sum(axis=0)\n item_matrix[0, :] = item_sum / item_nnz\n return user_matrix, item_matrix", "def create_adjust_matrix(self, is_sub: bool):\n matrix = None\n if not is_sub:\n ratings = np.ones_like(self._user, dtype=np.float32)\n matrix = sp.csr_matrix(\n (ratings, (self._user, self._item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n else:\n if self.type == \"ND\":\n drop_user = self.rand_sample(\n self.n_users,\n size=int(self.n_users * self.drop_ratio),\n replace=False,\n )\n drop_item = self.rand_sample(\n self.n_items,\n size=int(self.n_items * self.drop_ratio),\n replace=False,\n )\n R_user = np.ones(self.n_users, dtype=np.float32)\n R_user[drop_user] = 0.0\n R_item = np.ones(self.n_items, dtype=np.float32)\n R_item[drop_item] = 0.0\n R_user = sp.diags(R_user)\n R_item = sp.diags(R_item)\n R_G = sp.csr_matrix(\n (\n np.ones_like(self._user, dtype=np.float32),\n (self._user, self._item),\n ),\n shape=(self.n_users, self.n_items),\n )\n res = R_user.dot(R_G)\n res = res.dot(R_item)\n\n user, item = res.nonzero()\n ratings = res.data\n matrix = sp.csr_matrix(\n (ratings, (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n elif self.type == \"ED\" or self.type == \"RW\":\n keep_item = self.rand_sample(\n len(self._user),\n size=int(len(self._user) * (1 - self.drop_ratio)),\n replace=False,\n )\n user = self._user[keep_item]\n item = self._item[keep_item]\n\n matrix = sp.csr_matrix(\n (np.ones_like(user), (user, item + self.n_users)),\n shape=(self.n_users + self.n_items, self.n_users + self.n_items),\n )\n\n matrix = matrix + matrix.T\n D = np.array(matrix.sum(axis=1)) + 1e-7\n D = np.power(D, -0.5).flatten()\n D = sp.diags(D)\n return D.dot(matrix).dot(D)", "def forward(self, user, item):\n item_emb = self.product_factors(item.view(-1)) + self.product_bias(\n item.view(-1)\n )\n user_emb = self.user_factors(user.view(-1)) + self.user_bias(user.view(-1))\n mat_mult = (item_emb * user_emb).sum(1)\n\n return mat_mult", "def affinity_matrix(test_specs):\n\n np.random.seed(test_specs[\"seed\"])\n\n # uniform probability for the 5 ratings\n s = [(1 - test_specs[\"spars\"]) / test_specs[\"ratings\"]] * test_specs[\"ratings\"]\n s.append(test_specs[\"spars\"])\n P = s[::-1]\n\n # generates the user/item affinity matrix. Ratings are from 1 to 5, with 0s denoting unrated items\n X = np.random.choice(\n test_specs[\"ratings\"] + 1, (test_specs[\"users\"], test_specs[\"items\"]), p=P\n )\n\n Xtr, Xtst = numpy_stratified_split(\n X, ratio=test_specs[\"ratio\"], seed=test_specs[\"seed\"]\n )\n\n return Xtr, Xtst", "def update_model(self):\n for itemidx, itemid in self._allitems.iterrows():\n self.__itemid2idx[str(itemid['itemid'])] = itemidx\n self.__itemidx2id[itemidx] = str(itemid['itemid'])\n for useridx, userid in enumerate(self._uservisits['userid'].unique()):\n self.__userid2idx[str(userid)] = useridx\n self.__useridx2id[useridx] = str(userid)\n userid = self._uservisits['userid'].values\n itemid = self._uservisits['itemid'].values\n rating = self._uservisits['rating'].values\n useridx = [self.__userid2idx[str(int(uid))] for uid in userid]\n itemidx = [self.__itemid2idx[str(int(iid))] for iid in itemid]\n rating = list(map(numpy.double, rating))\n self.__itemuser = csr_matrix((rating, (useridx, itemidx)), shape=(len(set(useridx)), len(set(itemidx))))\n self.__recommender = implicit.als.AlternatingLeastSquares(factors=self.__numtopics)\n self.__recommender.fit(self.__itemuser)", "def recommend_NMF(new_user,movies_num,movies_ratings):\n list_id_movies = movies_ratings['movieId'].unique()\n nmf,Q = load_NMF_model()\n new_user_vector = pd.DataFrame(new_user, index=list_id_movies).T\n new_user_vector_filled = new_user_vector.fillna(3)\n #calculate Matrix P (Genres)\n P = nmf.transform(new_user_vector_filled)\n #make predictions\n predictions = np.dot(P,Q)\n recommendations = pd.DataFrame(predictions.reshape(-1), index=list_id_movies).T\n #Remove already watched movies:\n not_watched_movies_mask = np.isnan(new_user_vector)\n not_watched = recommendations[not_watched_movies_mask]\n\n top_movies_ids = not_watched.T.sort_values(by=[0], ascending=False).index[:movies_num]\n\n Top_recommended = movieId_to_title(top_movies_ids,movies_ratings) \n return Top_recommended", "def fit(self, df):\n\n # generate continuous indices if this hasn't been done\n if self.index2item is None:\n self.set_index(df)\n\n logger.info(\"Collecting user affinity matrix\")\n if not np.issubdtype(df[self.col_rating].dtype, np.number):\n raise TypeError(\"Rating column data type must be numeric\")\n\n # copy the DataFrame to avoid modification of the input\n temp_df = df[[self.col_user, self.col_item, self.col_rating]].copy()\n\n if self.time_decay_flag:\n logger.info(\"Calculating time-decayed affinities\")\n # if time_now is None use the latest time\n if not self.time_now:\n self.time_now = df[self.col_timestamp].max()\n\n # apply time decay to each rating\n temp_df[self.col_rating] *= exponential_decay(\n value=df[self.col_timestamp],\n max_val=self.time_now,\n half_life=self.time_decay_half_life,\n )\n\n # group time decayed ratings by user-item and take the sum as the user-item affinity\n temp_df = (\n temp_df.groupby([self.col_user, self.col_item]).sum().reset_index()\n )\n else:\n # without time decay use the latest user-item rating in the dataset as the affinity score\n logger.info(\"De-duplicating the user-item counts\")\n temp_df = temp_df.drop_duplicates(\n [self.col_user, self.col_item], keep=\"last\"\n )\n\n logger.info(\"Creating index columns\")\n # map users and items according to the two dicts. Add the two new columns to temp_df.\n temp_df.loc[:, self.col_item_id] = temp_df[self.col_item].map(self.item2index)\n temp_df.loc[:, self.col_user_id] = temp_df[self.col_user].map(self.user2index)\n\n # retain seen items for removal at prediction time\n self.seen_items = temp_df[[self.col_user_id, self.col_item_id]].values\n\n # affinity matrix\n logger.info(\"Building user affinity sparse matrix\")\n self.user_affinity = self.compute_affinity_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # calculate item co-occurrence\n logger.info(\"Calculating item co-occurrence\")\n item_cooccurrence = self.compute_coocurrence_matrix(\n temp_df, self.n_users, self.n_items\n )\n\n # free up some space\n del temp_df\n\n self.item_frequencies = item_cooccurrence.diagonal()\n\n logger.info(\"Calculating item similarity\")\n if self.similarity_type == sar.SIM_COOCCUR:\n self.item_similarity = item_cooccurrence\n elif self.similarity_type == sar.SIM_JACCARD:\n logger.info(\"Calculating jaccard\")\n self.item_similarity = jaccard(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n elif self.similarity_type == sar.SIM_LIFT:\n logger.info(\"Calculating lift\")\n self.item_similarity = lift(item_cooccurrence).astype(\n df[self.col_rating].dtype\n )\n else:\n raise ValueError(\n \"Unknown similarity type: {0}\".format(self.similarity_type)\n )\n\n # free up some space\n del item_cooccurrence\n\n logger.info(\"Done training\")", "def fit_data(self, matrix, user_features=None, item_features=None):\r\n matrix.sort_index(inplace=True)\r\n matrix.sort_index(inplace=True, axis=1)\r\n dataset = Dataset()\r\n dataset.fit((x for x in matrix.index),\r\n (x for x in matrix.columns))\r\n interactions = pd.melt(matrix.replace(0, np.nan).reset_index(),\r\n id_vars='index',\r\n value_vars=list(matrix.columns[1:]),\r\n var_name='plu_id',\r\n value_name='rating').dropna().sort_values('index')\r\n interactions.columns = ['crd_no', 'plu_id', 'rating']\r\n self.interactions, self.weights = dataset.build_interactions([tuple(x) for x in interactions.values])\r\n\r\n if user_features is not None:\r\n user_features.sort_index(inplace=True)\r\n dataset.fit_partial(users=user_features.index,\r\n user_features=user_features)\r\n self.user_features = dataset.build_user_features(\r\n ((index, dict(row)) for index, row in user_features.iterrows()))\r\n else:\r\n self.user_features = None\r\n if item_features is not None:\r\n item_features.sort_index(inplace=True)\r\n dataset.fit_partial(items=item_features.index,\r\n item_features=item_features)\r\n self.item_features = dataset.build_item_features(\r\n ((index, dict(row)) for index, row in item_features.iterrows()))\r\n else:\r\n self.item_features = None", "def __init__(self, ratings, rank, user_reg_loss, item_reg_loss):\n self._ratings = ratings\n self._users_num, self._items_num = ratings.shape\n self._rank = rank\n self._user_reg_loss = user_reg_loss\n self._item_reg_loss = item_reg_loss\n \n self._train_function = self._als_partial_step_explicit\n \n # DONE init latent factors for user and item matrix\n # losowo ustalamy inicjalne wartości X i Y\n self._user_factors = np.random.random((self._users_num, rank))\n self._item_factors = np.random.random((self._items_num, rank))", "def predict_rating(user_id,item_id):\n user_preference = latent_user_preferences[user_id]\n item_preference = latent_item_features[item_id]\n return user_preference.dot(item_preference)", "def compute_affinity_matrix(self, df, n_users, n_items):\n\n return sparse.coo_matrix(\n (df[self.col_rating], (df[self.col_user_id], df[self.col_item_id])),\n shape=(n_users, n_items),\n ).tocsr()", "def init_MF(train, num_features):\n num_user = train.shape[1]\n num_item = train.shape[0]\n user_features = np.random.rand(num_features,num_user) # user_features shape (20,943)\n item_features = np.random.rand(num_item, num_features) # item_features shape (1152,20)\n return user_features, item_features", "def feature_matrix(df, user_id=None, item_id=None):\n print(\"get feature matrix\")\n df1 = df.drop_duplicates(subset=['user_id'], keep='first', inplace=False)\n user_x = None\n if user_id is not None:\n user_x = int(np.argwhere(df1['user_id'].values == user_id))\n user_features = df1[['average_stars']].values\n csr_user_features = sparse.csr_matrix(user_features)\n\n df2 = df.drop_duplicates(\n subset=['business_id'],\n keep='first',\n inplace=False)\n item_x = None\n if item_id is not None:\n item_x = int(np.argwhere(df2['business_id'].values == item_id))\n item_features = df2.iloc[:, 10:].values\n\n csr_item_features = sparse.csr_matrix(item_features)\n return csr_user_features, csr_item_features, user_x, item_x", "def simMatrix(self, d = 1/5):\n \n self.fit_baseline(d)\n self.evalBaseline()\n \n \n df_mat = np.array(self.df[[\"user ind\", \"item ind\", \"rating\"]].merge(self.r_b, on = [\"user ind\", \"item ind\"]))\n df_ind = df_mat[:,:2].astype(int)\n df_rat = df_mat[:,2] - df_mat[:,3]\n \n \n self.M = np.zeros((self.n_us, self.n_it))\n \n \n widgets = ['Test: ', Percentage(), ' ', Bar(\"#\"), ' ', ETA()]\n pbar = ProgressBar(widgets = widgets, maxval = self.n_us)\n pbar.start()\n \n for us in self.user_ind:\n it = df_ind[np.where(df_ind[:,0] == us)[0], 1]\n rat1 = df_rat[np.where(df_ind[:,0] == us)[0]]\n self.M[us,it] = rat1\n \n pbar.update(us)\n \n pbar.finish()\n \n #self.M = self.UI.toarray()\n pbar = ProgressBar(widgets = widgets, maxval = self.n_it * (self.n_it - 1) / 2)\n pbar.start()\n \n self.S = np.empty((self.n_it, self.n_it)) * np.nan\n \n for i1 in range(self.n_it):\n # self.S[i1,i1] = 1\n x1 = self.M[:,i1]\n for i2 in range(i1+1,self.n_it):\n x2 = self.M[:,i2]\n I = np.logical_and(x1, x2)\n if (len(I) > 1):\n self.S[i1,i2] = self.S[i2,i1] = Sim.cos2(x1.T[I], self.M[:,i2].T[I])\n \n pbar.update((self.n_it)*(i1+1) - (i1+2)*(i1+1)/2)\n \n pbar.finish()\n \n return self.S", "def score_items(X, U, mu,\n scoremethod='lowhigh',\n missingmethod='none',\n feature_weights=[]):\n\n # Use U to model and then reconstruct the data in X.\n # 1. Project all data in X into space defined by U,\n # then reconstruct it.\n if missingmethod.lower() != 'ignore':\n # All missing values should have been replaced with 0,\n # or non-existent.\n # 1a. Subtract the mean and project onto U\n proj = np.dot(U.T, (X - mu))\n # 1b. Reconstruct by projecting back up and adding mean\n reproj = np.dot(U, proj) + mu\n # 1c. Compute the residual\n #print('X:', X.T)\n #print('reproj:', reproj.T)\n err = X - reproj\n #print('err:', err.T)\n #input()\n \n else:\n # Missing method must be 'ignore' (Brand 2002)\n (err, reproj) = compute_error_with_missing(X, U, mu)\n\n # 2. Compute reconstruction error\n if scoremethod == 'low': # Blank out all errors > 0\n err[err>0] = 0\n elif scoremethod == 'high': # Blank out all errors < 0\n err[err<0] = 0\n else: # default, count everything\n pass\n \n # Weight features if requested\n if len(feature_weights) > 0:\n for i in range(len(feature_weights)):\n err[i,:] = err[i,:] * feature_weights[i]\n\n if missingmethod.lower() == 'ignore':\n # Only tally error for observed features.\n # This means that items with missing values are not penalized\n # for those features, which is probably the best we can do.\n scores = np.nansum(np.array(np.power(err, 2)), axis=0)\n else:\n scores = np.sum(np.array(np.power(err, 2)), axis=0)\n\n #print('scores:', scores)\n #print('reproj:', reproj)\n #input()\n return (scores, reproj)", "def ratings_to_matrix(ratings_df, user_col, item_col, rating_col, forced_shape=None):\n users_num = ratings_df.user_id.max() + 1\n items_num = ratings_df.item_id.max() + 1\n \n if forced_shape:\n users_num = max(users_num, forced_shape[0])\n items_num = max(items_num, forced_shape[1])\n \n ratings_mat = np.zeros([users_num, items_num])\n for rating in ratings_df.itertuples():\n ratings_mat[rating[user_col], rating[item_col]] = rating[rating_col]\n \n return ratings_mat", "def __init__(self, user_factors, item_factors):\n self._user_factors = np.copy(user_factors)\n self._item_factors = np.copy(item_factors)\n \n self._users_num = user_factors.shape[0]\n self._items_num = item_factors.shape[0]\n\n assert user_factors.shape[1] == item_factors.shape[1]", "def fit(self, ratings):\n # Training proceeds in 2 steps:\n # 1. Normalize item vectors to be mean-centered and unit-normalized\n # 2. Compute similarities with pairwise dot products\n self._timer = util.Stopwatch()\n\n init_rmat, users, items = matrix.sparse_ratings(ratings)\n n_items = len(items)\n _logger.info('[%s] made sparse matrix for %d items (%d ratings from %d users)',\n self._timer, len(items), init_rmat.nnz, len(users))\n\n rmat, item_means = self._mean_center(ratings, init_rmat, items)\n\n rmat = self._normalize(rmat)\n\n _logger.info('[%s] computing similarity matrix', self._timer)\n smat = self._compute_similarities(rmat)\n\n _logger.info('[%s] got neighborhoods for %d of %d items',\n self._timer, np.sum(np.diff(smat.rowptrs) > 0), n_items)\n\n _logger.info('[%s] computed %d neighbor pairs', self._timer, smat.nnz)\n\n self.item_index_ = items\n self.item_means_ = item_means\n self.item_counts_ = np.diff(smat.rowptrs)\n self.sim_matrix_ = smat\n self.user_index_ = users\n self.rating_matrix_ = init_rmat\n\n return self", "def important_factors_based_on_ratings(data: pd.DataFrame) -> np.ndarray:\n # Turn labels into binary classification for equal class distribution\n data = utils.add_ratings_binary(data)\n # Get feature and label data for classifcation from original dataset\n X, y = utils.get_rating_features_labels(data)\n\n # Grab features from feature matrix\n features = X.columns\n\n # split data into train and test set\n X_train, X_test, y_train, y_test = model_selection.train_test_split(X.values, y, test_size=0.2) \n\n # Instantiate and train xgboost model for rating classfication\n xgb_model = xgboost.XGBClassifier()\n xgb_model.fit(X_train, y_train)\n\n # Grab feature importance scores from trained model\n feature_importance = xgb_model.feature_importances_\n # Find indices of top 2 important features\n top_important_features_ind = np.argpartition(feature_importance, -2)[-2:]\n\n print(f\"The top 2 important features are {features[top_important_features_ind]}\")\n\n return feature_importance", "def get_user_feature_matrix(user_dict, user_index, aspect_index, N):\n result = np.zeros((len(user_index), len(aspect_index)))\n for key in user_dict.keys():\n index_user = user_index[key]\n user_reviews = user_dict[key]\n count_dict = {}\n for review in user_reviews:\n feature = review[0]\n if feature not in aspect_index:\n continue\n aspect = aspect_index[feature]\n if aspect not in count_dict:\n count_dict[aspect] = 0;\n count_dict[aspect] += 1\n for aspect in count_dict.keys():\n count = count_dict[aspect]\n result[index_user, aspect] = 1 + (N - 1) * (2 / (1 + exp(-count)) - 1)\n return result", "def personalization(prediction, n):\n # prediction\n # n top n recommendation\n\n top_n = get_top_n(prediction, n)\n\n rec_dict = {}\n for uid, user_ratings in top_n.items():\n rec_dict[uid] = [iid for (iid, _) in user_ratings]\n\n rec_user_ls = [pred[0] for pred in prediction]\n rec_item_ls = [pred[1] for pred in prediction]\n\n unique_rec_user_ls = np.unique(rec_user_ls)\n unique_rec_item_ls = np.unique(rec_item_ls)\n\n # assign each item with index number\n unique_rec_item_dict = {item: ind for ind,\n item in enumerate(unique_rec_item_ls)}\n\n n_unique_rec_user = len(unique_rec_user_ls)\n n_unique_rec_item = len(unique_rec_item_ls)\n\n # recommended user item matrix\n rec_matrix = np.zeros(shape=(n_unique_rec_user, n_unique_rec_item))\n\n # represent recommended item for each user as binary 0/1\n for user in range(n_unique_rec_user):\n # get userid\n user_id = unique_rec_user_ls[user]\n # get rec item list\n item_ls = rec_dict[user_id]\n\n for item_id in item_ls:\n # get item index\n item = unique_rec_item_dict[item_id]\n rec_matrix[user, item] = 1\n\n # calculate cosine similarity matrix across all user recommendations\n similarity = cosine_similarity(X=rec_matrix, dense_output=False)\n # calculate average of upper triangle of cosine matrix\n upper_right = np.triu_indices(similarity.shape[0], k=1)\n # personalization is 1-average cosine similarity\n score = 1 - np.mean(similarity[upper_right])\n return score", "def update_item_feature(\n train, item_features, user_features, lambda_item,\n nz_item_userindices, I):\n for n, item_n in enumerate(nz_item_userindices):\n nnz_users_per_item = len(item_n[1]) # Number of users who rated item n\n if (nnz_users_per_item == 0): nnz_users_per_item = 1\n # Least squares solution\n A_n = np.dot(user_features[:,item_n[1]], user_features[:,item_n[1]].T) + lambda_item * nnz_users_per_item * I\n V_n = np.dot(user_features[:,item_n[1]], train.T[item_n[1],item_n[0]].todense())\n #if (n%3 == 0): print(\"item_n: {}\".format(item_n[0]), np.linalg.det(A_n))\n if (np.linalg.det(A_n) != 0): item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)\n else: \n A_n[0,0] += 1; A_n[1,1] += 1; A_n[2,2] += 1; A_n[3,3] += 1; A_n[4,4] += 1; A_n[5,5] += 1 # if matrix A_n is singular, slightly modify several values\n item_features.T[:,item_n[0]] = np.linalg.solve(A_n,V_n)", "def item_user_matrix(X):\n X['user_id'] = X['user_id'].astype(\"category\")\n X['song_id'] = X['song_id'].astype(\"category\")\n\n row = X['song_id'].cat.codes.copy()\n col = X['user_id'].cat.codes.copy()\n\n nrow = len(X['song_id'].cat.categories)\n ncol = len(X['user_id'].cat.categories)\n\n item_user = csr_matrix((X['score'], (row, col)), shape=(nrow, ncol))\n\n user = dict(enumerate(X['user_id'].cat.categories))\n user_index = {u: i for i, u in user.items()}\n\n item = dict(enumerate(X['song_id'].cat.categories))\n item_index = {s: i for i, s in item.items()}\n\n return item_user, item_index, user_index" ]
[ "0.6282623", "0.62621325", "0.60587424", "0.6045789", "0.6040702", "0.6002245", "0.5951851", "0.59179777", "0.59059614", "0.58943605", "0.589419", "0.587945", "0.5858747", "0.58264637", "0.5798391", "0.57919794", "0.574544", "0.5737683", "0.5693354", "0.5668427", "0.56560904", "0.565145", "0.5631024", "0.5595122", "0.55878645", "0.5560108", "0.55080867", "0.5492173", "0.5471623", "0.54714626" ]
0.72323316
0
Get list of Domains for this API key.
def get_domains() -> List[str]: ret = _call_endpoint("v1/domains") # Example response: # [{'createdAt': '2016-06-25T03:08:44.000Z', # 'domain': 'mydomain.com', # 'domainId': 12345678, # 'expirationProtected': False, # 'expires': '2020-06-25T03:08:44.000Z', # 'holdRegistrar': False, # 'locked': True, # 'nameServers': None, # 'privacy': False, # 'renewAuto': True, # 'renewDeadline': '2020-08-09T03:08:44.000Z', # 'renewable': True, # 'status': 'ACTIVE', # 'transferProtected': False},] domains = [d["domain"] for d in ret] return domains
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def listDomains(self):\n reply = self.rpc.getDomains(self.username,\n self.password)\n if reply[0] == 'UNKNOWN_ERROR':\n raise Exception(\"RPC returned error: \" + reply[0])\n return reply", "def domains(self):\n return DomainCollection(self.request)", "def list_domain_names(self) -> Dict:\n pass", "def tracking_domain_list(self):\r\n params = base.get_params(None, locals())\r\n return self._get('tracking_domain_list', params)", "def ListDomains(self, perPage=0, page=1):\n\n class Result(Model):\n domains = ListField(ModelField(Domain))\n\n if perPage != 0:\n headers = {\"perPage\": perPage, \"page\": page}\n response = self.client.http_get(\"/v4/domains\", headers)\n else:\n response = self.client.http_get(\"/v4/domains\")\n\n return parse_response(response, Result)", "def getDomains(self, company):\n return self.db.getDomains(company)", "def list_domain(self, feed_id=None):\n resources = self.list_resource(feed_id=feed_id, resource_type_id='Host Controller')\n domains = []\n if resources:\n for resource in resources:\n resource_data = self.get_config_data(\n feed_id=resource.path.feed_id, resource_id=resource.id)\n domain_data = resource_data.value\n domains.append(Domain(resource.id, resource.name, resource.path, domain_data))\n return domains", "def list_domain(self, feed_id=None):\n domains = self.list_resource(feed_id=feed_id,\n resource_type_id='Domain Host',\n cls=Domain,\n list_children=True,\n include_data=True)\n return domains", "def domains(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"domains\")", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def domains(cls):\n return [cls.domain]", "def list_zones(self, **kwargs):\r\n return self.client['Account'].getDomains(**kwargs)", "def domains(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"domains\")", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def domain(self):\n return self.keys()", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfg_GetSearchDomains', self.handle))", "def domains(self) -> pulumi.Input[Sequence[pulumi.Input['ElastigroupIntegrationRoute53DomainArgs']]]:\n return pulumi.get(self, \"domains\")", "def list_domain_names():\n pass", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def get_subdomains(self):\n\n response = self.call(method='getSubdomains', args=[self.domainname])\n subdomains = []\n for s in response:\n subdomain = self.subdomain(domain=self.domainname, subdomain=s)\n subdomains.append(subdomain)\n return subdomains", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def get(self, api_key):\n\n try:\n mailgun.list_domains(api_key)\n return {\"api_key\": api_key, \"valid\": True}\n except:\n return {\"api_key\": api_key, \"valid\": False}", "def get_search_domains(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrvCfgNet_GetSearchDomains', self.handle))", "def AllowedDomains(self)->list:\n return self._allowedDomains", "def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain", "def domains(cls):\n return (cls.domain, )", "def domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"domains\")", "def get_dns_list(self):\n return self.get_ipv4_dns_list()" ]
[ "0.82744664", "0.76725835", "0.7275466", "0.72130734", "0.7188468", "0.7123102", "0.7106527", "0.71019924", "0.7034431", "0.70254606", "0.7023339", "0.69961786", "0.6931258", "0.69296205", "0.6853687", "0.6843187", "0.6822089", "0.6772705", "0.6753809", "0.6742991", "0.6693973", "0.6681079", "0.66689324", "0.66550237", "0.66511244", "0.6635103", "0.6612865", "0.658361", "0.6566474", "0.65224487" ]
0.78274804
1
Get DNS entries for a specific domain
def get_domain_dns_records(domain): url_suffix = "v1/domains/{}/records".format(domain) ret = _call_endpoint(url_suffix) if isinstance(ret, dict) and ret.get('code', None) == "UNKNOWN_DOMAIN": # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'} raise Exception(f"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}") return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def print_all_dns_records():\n for domain in sorted(get_domains()):\n dns_records = get_domain_dns_records(domain)\n print(domain)\n pprint(dns_records)\n print(\"*\" * 50)\n # TODO: poor man's rate limiter. improve?\n time.sleep(2)", "def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))", "def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())", "def infoDnsRecords(self, domainname: str) -> DNSRecordSet:\n response = self._send(self.nc_request(action=\"infoDnsRecords\", parameters={\"domainname\": domainname}))\n\n # build records\n rset = DNSRecordSet(dnsrecords=[])\n for r in response[\"dnsrecords\"]:\n dr = DNSRecord(id=int(r[\"id\"]),\n hostname=r[\"hostname\"],\n type=r[\"type\"],\n priority=int(r[\"priority\"]),\n destination=r[\"destination\"],\n deleterecord=r[\"deleterecord\"],\n state=r[\"state\"])\n\n rset.dnsrecords.append(dr)\n\n return rset", "def dns(self, **kwargs):\n self.logger.debug(f\"Get RealTime DNS data\")\n url_path = 'dns'\n body = self._make_body(kwargs)\n return self._common_post(request_path=url_path, body=body)", "def getIPs(self, domain = \"localhost\"):\n # convert 'domain' to string, in case of erroneous type being passed\n domain = str(domain)\n\n # Kind warning for those who entered an IP address instead of a domain\n try: \n inet_aton(domain)\n print(\"Warning: an IP address was given instead of a domain name.\")\n except:\n pass\n\n # Try to query DNS records to populate A-Record IP list\n # Prints errors and returns None if exceptions found\n try:\n iplist = gethost(domain)[2]\n except gaierror as ge:\n if ge.errno == -2:\n print(\"Error: Domain '{}' invalid, or unknown. \"\\\n \"Please check proper spelling and format.\\n\"\\\n \"(e.g.: python dns_get_A_record_IPs.py google.com )\".format(domain))\n elif ge.errno == -3:\n print(\"Error: Domain '{}' unreachable. Please check your connection.\".format(domain))\n return None\n except timeout:\n print(\"Error: Connection to {} timed out.\".format(domain))\n return None\n\n return iplist", "def gethostbyname(self, hostname, dnsserv='192.112.36.4'):\n ipaddrlist = []\n cnames = []\n temp = []\n if(self.caching):\n rcache = RecordCache(self.ttl)\n rcord = rcache.lookup(hostname, Type.ANY, Class.IN)\n if(rcord):\n for rec in rcord:\n if rec.type_ == Type.A:\n arec = rec.rdata\n ipaddrlist.append(arec.address)\n elif rec.type_ == Type.CNAME:\n crec = rec.rdata\n cnames.append(crec.cname)\n if ipaddrlist:\n return hostname, cnames, ipaddrlist\n elif cnames:\n return self.gethostbyname(cnames[0], dnsserv)\n \n\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.settimeout(self.timeout)\n\n # Create and send query\n question = Question(Name(str(hostname)), Type.A, Class.IN)\n header = Header(9001, 0, 1, 0, 0, 0)\n header.qr = 0\n header.opcode = 0\n header.rd = 1\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (str(dnsserv), 53))\n\n # Receive response\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n print(\"Number of answers: \" +str(len(response.answers)))\n print(\"Number of authorities: \" + str(len(response.authorities)))\n print(\"Number of additionals: \" + str(len(response.additionals)))\n\n # Get data\n aliaslist = cnames\n ipaddrlist = []\n dnslist = []\n \n while response.answers:\n for answer in response.answers:\n if answer.type_ == Type.A:\n print(\"found A RR\")\n if(self.caching):\n rcache.add_record(answer)\n ipaddrlist.append(answer.rdata.address)\n if answer.type_ == Type.CNAME:\n aliaslist.append(answer.rdata.cname)\n if answer.type_ == Type.NS:\n dnslist.append(answer.rdata.nsdname)\n if ipaddrlist:\n return hostname, aliaslist, ipaddrlist\n elif aliaslist:\n question = Question(Name(aliaslist[0]), Type.A, Class.IN)\n query = Message(header, [question])\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n elif dnslist:\n nsname = dnslist.pop()\n maybe_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_dnsserv:\n dnsserv = maybe_dnsserv\n else:\n pass\n sock.sendto(query.to_bytes(), (dnsserv, 53))\n data = sock.recv(2048)\n response = Message.from_bytes(data)\n else:\n break\n\n if response.authorities:\n for authority in response.authorities:\n if authority.type_ != Type.NS:\n pass\n dnslist.append(authority.rdata.nsdname)\n while dnslist:\n nsname = dnslist.pop()\n maybe_next_dnsserv = self.getnsaddr(nsname, response.additionals)\n if maybe_next_dnsserv:\n next_dns_serv = maybe_next_dnsserv\n else:\n pass\n (hname, aliasl, ipaddrl) = self.gethostbyname(hostname, nsname)\n if ipaddrl:\n return hname, aliasl, ipaddrl", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def list_domain_names():\n pass", "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def search(url, domain_list):\n resp = requests.get(url)\n if not resp.json().get('hits', '').get('hits', []):\n return\n for hit in resp.json()[\"hits\"][\"hits\"]:\n domain = hit.get(\"_source\", {}).get(\"domain\", \"\")\n if not domain:\n continue\n if not domain in domain_list:\n domain_list.append(domain)\n #print(hit[\"_source\"].get(\"title\", \"\").encode(\"ascii\",\"ignore\"))\n if domain not in ALLOWED_DOMAINS:\n print(domain)", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict", "def query_dns_records(event, context):\n ids = ['SOA', 'TXT', 'MX', 'NS', 'DNSKEY']\n dn = event['queryStringParameters'][query_parameter].lower()\n body = {'scanDate': (datetime.datetime.now(datetime.timezone.utc) +\n datetime.timedelta(hours=8)).isoformat().upper()[:26],\n 'scanRecordTypes': ids,\n 'domain': dn,\n 'records': {}}\n\n try:\n try:\n for record_type in ids:\n try:\n answers = dns.resolver.query(dn, record_type)\n records = []\n for data in answers:\n records.append(data.to_text())\n body['records'][record_type] = records\n except (dns.resolver.NoAnswer, dns.resolver.NoNameservers, dns.exception.Timeout):\n pass # might fail per record_type, perfectly fine\n\n # insert into DynamoDB\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n table.put_item(Item=body)\n status_code = 200\n result = json.dumps(body)\n\n except dns.resolver.NXDOMAIN:\n status_code = 404 # domain no longer exists, or domain not found :)\n result = ''\n\n except KeyError: # insufficient queryStringParameters\n status_code = 400\n result = ''\n\n return {'statusCode': status_code,\n 'headers': headers,\n 'body': result}", "def domain_command():\n # 1. Get input host from Demisto\n domain = demisto.args().get('domain')\n # 2. Get the host reputation from SlashNext API\n response = domain_lookup(domain=domain)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, domain_cont = get_dbot_std_context(\n domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))\n\n ec = {\n 'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'Domain': domain_cont\n }\n\n domain = domain.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Domain Lookup\\n' \\\n '##### domain = {}'.format(domain.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result", "def domain_lookup(domain):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': domain\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def get_urls(self, queries):\n domains = defaultdict(list)\n for q in queries:\n q = \"\\\"\" + q + \"\\\"\"\n results = self.engine.search(q)\n\n for result in results: \n url = result.url\n domain = self.get_domain(url)\n domains[domain].append(q) \n return domains", "def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")", "def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)", "def dict_of_domains(fc):\r\n # need to find root database (GDB or SDE)\r\n db_root = os.path.dirname(fc)\r\n while db_root[-4:].lower() != '.gdb' and db_root[-4:].lower() != '.sde':\r\n old_db_root = db_root # protect against infinite loop\r\n db_root = os.path.dirname(db_root)\r\n if old_db_root == db_root: # protect against infinite loop\r\n break\r\n arcpy.AddMessage(\"Retrieving Domains from \" + str(db_root))\r\n return {domain.name: domain.codedValues for domain in arcpy.da.ListDomains(db_root)}", "def get_ds(self, domain: str, as_json: bool = False):\n formatted_answer = {'domain': domain, 'rr_types': [\"ds\"], 'answer': None}\n\n status, result = Resolver.ctx_dnssec.resolve(domain, rrtype=ub.RR_TYPE_DS)\n\n if status == 0 and result.havedata:\n print(\"ds record returned.\")\n formatted_answer['answer'] = {}\n ds_records_list = result.data.data\n i = 0\n for ds in ds_records_list:\n if as_json:\n formatted_answer['answer'][i] = str(ds)\n else:\n formatted_answer['answer'][i] = ds\n i += 0\n elif status != 0: # throw/raise error\n print(\"Resolve error: \", ub.ub_strerror(status))\n elif result.havedata == 0: # if no data in result\n print(\"No data.\")\n if as_json:\n return json.dumps(formatted_answer)\n return DNSFormattedResponse(formatted_answer)", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def list_domain_names(self) -> Dict:\n pass" ]
[ "0.754245", "0.69325036", "0.6888839", "0.6870619", "0.68629503", "0.6824631", "0.67168874", "0.66804457", "0.6670255", "0.6651589", "0.664277", "0.66395545", "0.66219234", "0.6609759", "0.6591403", "0.65406996", "0.6507059", "0.64893925", "0.647041", "0.64417464", "0.6440061", "0.64374006", "0.6430975", "0.63384515", "0.6335258", "0.63166475", "0.63051164", "0.63046676", "0.6266443", "0.6263354" ]
0.7584279
0
Print each domain and its DNS records (for domains linked to this API key).
def print_all_dns_records(): for domain in sorted(get_domains()): dns_records = get_domain_dns_records(domain) print(domain) pprint(dns_records) print("*" * 50) # TODO: poor man's rate limiter. improve? time.sleep(2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cb_listdomains(self, cmd):\n for cur in sorted(self.d.listDomains(),\n key=lambda x: _domreverse(x['domain'])):\n print \"%(domain)60s %(expiration_date)15s\" % cur", "def cli(ctx, domain, ip_address, hostname):\n zone = getzone(domain)\n #print('.%s:%s:%s' % (domain, ip_address, hostname))\n for r in zone:\n if r['type'] == 'CNAME':\n print('C%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'TXT':\n print('\\'%s:%s' %( r['name'], r['content']))\n elif r['type'] == 'MX':\n pass\n elif r['type'] == 'A':\n print('=%s:%s' %( r['name'], r['content']))\n else:\n exit('unknown DNS record type: %s' % r['type'])", "def get_dns_records_from_godaddy(self) -> list:\n\n headers = {\"Authorization\": \"sso-key {}:{}\".format(self.api_key, self.secret_key)}\n dns_records = []\n for dns_record in self.dns_records:\n url = \"https://api.godaddy.com/v1/domains/{}/records/{}/{}\".format(dns_record[\"domain\"],\n dns_record[\"dns_record_type\"],\n dns_record[\"name\"])\n dns_records.append(get(url, headers=headers).text)\n return dns_records", "def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret", "def print_domain(self):\n print('\\n*****************')\n print('DOMAIN: ' + self.domain)\n print('REQUIREMENTS: ' + str(self.requirements))\n print('TYPES: ' + str(self.types))\n print('PREDICATES: ' + str(self.predicates))\n print('ACTIONS: ' + str(self.actions))\n print('FUNCTIONS: ' + str(self.functions))\n print('CONSTANTS: ' + str(self.constants))\n print('****************')", "def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )", "def show_domains(self):\n show_domains(self.system.cavity_gri)", "def test_getdnsrecords(self, kasserver):\n assert kasserver.get_dns_records(\"example.com\") == self.RESPONSE_PARSED", "def list_domain_names(self) -> Dict:\n pass", "def list_domain_names():\n pass", "def get_dns_list(self):\n return self.get_ipv4_dns_list()", "def list_type_A_domain(self, domain):\n r53 = self.connections.get_route53()\n # Get Zone ID\n zone = r53.get_zone(domain)\n zone_id = zone.id\n # Get all type A records\n records = r53.get_all_rrsets(hosted_zone_id=zone_id, name='A')\n for record in records:\n print(record)", "def get_dns(self):\n dns = []\n for id, user in self.users_by_id.items():\n if not user.dns:\n continue\n for dn in user.dns:\n dns.append(dn)\n return dns", "def infoDnsRecords(self, domainname: str) -> DNSRecordSet:\n response = self._send(self.nc_request(action=\"infoDnsRecords\", parameters={\"domainname\": domainname}))\n\n # build records\n rset = DNSRecordSet(dnsrecords=[])\n for r in response[\"dnsrecords\"]:\n dr = DNSRecord(id=int(r[\"id\"]),\n hostname=r[\"hostname\"],\n type=r[\"type\"],\n priority=int(r[\"priority\"]),\n destination=r[\"destination\"],\n deleterecord=r[\"deleterecord\"],\n state=r[\"state\"])\n\n rset.dnsrecords.append(dr)\n\n return rset", "def get_botnet_domains():\n\n fw = \"<HTTPS://YOUR_FORTIGATE_IP:YOUR_FORTIGATE_PORT>\"\n\n path = \"/api/v2/monitor/system/botnet-domains/hits/?access_token=\"\n\n token = \"<YOUR_API_KEY>\"\n\n content_filter = \"\"\n\n if content_filter != \"\":\n url = fw + path + token + content_filter\n else:\n url = fw + path + token\n\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n \n try:\n r = requests.get(url, verify=False).json()\n except Exception:\n print(\"Something went wrong. Is the url correct? Exiting...\")\n sys.exit()\n\n for key in r['results']:\n print()\n for k,v in key.items():\n print(\"{0:6} : {1}\".format(k.upper(), str(v)))", "def list_keystone_v3_domains(self):\n LOG_OBJ.debug(\"List the domains.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Listing domains Failed with status %s \"\n \"and error : %s\" % response.status, response.data)\n print (\" Listing domains Failed with status %s and error : %s\" %\n response.status, response.data)\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domains list : %s \" % output)\n print (\"Domains list : %s \" % output)\n return output['domains']", "def domain_list_all(self):\n page = 1\n on_page = 100\n ret = []\n while True:\n r = self.domain_list(page=page, on_page=on_page)\n ret += r['domains']\n if len(ret) >= r['total']:\n break\n page += 1\n return ret", "def print_sodoku(sodoku_domains):\n for y in range(9):\n print(*[sodoku_domains[(x,y)] for x in range(9)],sep=\",\")", "def retrieve_resource_records(self):\n log('Retrieving records for {}'.format(self.domain))\n current_records = self._api_connection('dnsListRecords')\n for current_resource_record in current_records.iter('resource_record'):\n self.current_records.append(\n dict(\n (resource_record.tag, resource_record.text)\n for resource_record\n in current_resource_record.iter()\n )\n )\n log('{} records retrieved for {}'.format(len(self.current_records), self.domain))\n log(self.current_records)", "def create_dns_dictionary(self, path_tracefile):\n responses = self.get_dns_responses(path_tracefile)\n dns_dict = dict()\n for response in responses:\n for x in range(response[DNS].ancount): # answer count, how many IP adresses are returned for the query\n try: # answer count could also include 'DNS SRV Resource Record' which does not have a 'rrname' attribute so ancount is wrong if there is such a record -> TODO get amount of DNSRR instead of using ancount\n domain = getattr(response[DNSRR][x], 'rrname').decode(\"utf-8\") # domain (this is returned in bytes so decode)\n ip = getattr(response[DNSRR][x], 'rdata') # IP adres of the domain, TODO make this work for multiple ip adresses for one domain (Test with [0] at end)\n dns_dict[ip] = domain[:-1] #remove last char '.' \n except:\n continue\n return dns_dict", "def list_all():\n\n return (_conn.listDefinedDomains() +\n [_conn.lookupByID(id).name() for id in _conn.listDomainsID()])", "def domainlist_reversewhois(self, response):\n data = response.json()\n for domain in data['response']['domains']:\n yield(domain.lower())", "def getlist(self):\n self.__domainlist.sort()\n\n outstr = \"{ \"\n for index, domain in enumerate(self.__domainlist):\n outstr += domain + \" \"\n if (index % 50 == 0) and index > 0:\n outstr += \"}\\n{ \"\n\n outstr += \"}\"\n\n return outstr", "def domainlist_reversens(self, response):\n data = response.json()\n for domain in itertools.chain(data['response']['primary_domains'], data['response']['primary_domains']):\n yield(domain.lower())", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def _display_dns_results(self):\n if self.check_valid_result_data(\"dns_results\", silent=True):\n nb_markdown(f\"DNS events related to {self.url}\", \"bold\")\n display(self._last_result.dns_results)\n else:\n nb_markdown(f\"No DNS resolutions found for {self.url}\")", "def domainlist_reverseip(self, response):\n data = response.json()\n for ip in data['response']['ip_addresses']:\n for domain in ip['domain_names']:\n yield(domain.lower())", "def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)", "def getSDDCDNS_Zones(**kwargs):\n sessiontoken = kwargs['sessiontoken']\n proxy = kwargs['proxy']\n json_response = get_sddc_dns_zones_json(proxy,sessiontoken)\n sddc_dns = json_response['results']\n table = PrettyTable(['ID', 'Name','DNS Domain Names','upstream_servers'])\n for i in sddc_dns:\n table.add_row([i['id'], i['display_name'], i['dns_domain_names'], i['upstream_servers']])\n # return table\n print(table)", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains" ]
[ "0.70802027", "0.6874424", "0.67829025", "0.6670683", "0.6497714", "0.64597666", "0.6447752", "0.6308665", "0.62406534", "0.6203262", "0.619593", "0.6123511", "0.60674375", "0.6036058", "0.6034905", "0.6027634", "0.59853095", "0.5983278", "0.59728104", "0.5970376", "0.5966782", "0.5955435", "0.59287465", "0.5914915", "0.5912606", "0.59061444", "0.58951664", "0.58683264", "0.58351517", "0.58340347" ]
0.8448762
0
Returns a request handler class that redirects to supplied `url`
def redirect_handler_factory(): class RedirectHandler(SimpleHTTPServer.SimpleHTTPRequestHandler): def do_GET(self): self.send_response(301) domain = self.headers['host'] if ':' in domain: domain = domain.split(':')[0] self.send_header('Location', "https://" + domain + self.path) self.end_headers() return RedirectHandler
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def redirect_handler_factory(url):\n class RedirectHandler(http.server.SimpleHTTPRequestHandler):\n def do_GET(self):\n self.send_response(302)\n self.send_header('Location', url)\n self.end_headers()\n\n return RedirectHandler", "def redirect(url):", "def __call__(self, cls):\n handler = type('handler', (Handler,), dict(cls.__dict__))\n return handler(self._url_pattern)", "def redirect(self, url):\n raise RequestRedirect(url)", "def __init__(self, url, **kwargs):\n super(Redirect, self).__init__(**kwargs)\n self.value = url", "def urlfor( request, *args, **kwargs ):", "def _make_ssh_forward_handler_class(self, remote_address_):\n class Handler(_ForwardHandler):\n remote_address = remote_address_\n ssh_transport = self._transport\n logger = self.logger\n return Handler", "def _redirect(self, url):\n logger.debug('Redirecting to URL %s', url)\n segments = urllib.parse.urlparse(url)\n\n host = segments.netloc\n if host != self._host:\n self.new_connection(host)\n\n relurl = urllib.parse.urlunparse(('', '') + segments[2:])\n try:\n self._raw_get(relurl)\n except http.client.HTTPException as e:\n logger.debug('Got exception: %s.', e)\n raise DDGConnectionError(\"Failed to get '%s'.\" % url)", "def get_redirect_handler_for_site(site, request):\n\n view = queryMultiAdapter((site, request), name=\"redirect_handler\")\n if view:\n return view\n\n # Check if we have a redirect handler script in the site root\n if \"redirect_handler\" in site:\n return site[\"redirect_handler\"]\n\n return None", "def redirect(url, code=302):\n exc = status_map[code]\n raise exc(location=url).exception", "def get(url):\n url = add_slash(url)\n\n def _(func):\n re_url = re.compile(\"^%s$\" % url)\n REQUEST_MAPPINGS['GET'].append((re_url, url, func))\n return func\n return _", "def requestredirect(function):\n def wrapped(self, id, *args):\n try:\n server = self.db.requests.get_imaging_server(id)\n except exceptions.NotFound:\n raise web.notfound()\n if server != config.get('server', 'fqdn'):\n raise web.found(\"http://%s%s\" % (server, web.ctx.path))\n return function(self, id, *args)\n return wrapped", "def post(url):\n url = add_slash(url)\n\n def _(func):\n re_url = re.compile(\"^%s$\" % url)\n REQUEST_MAPPINGS['POST'].append((re_url, url, func))\n return func\n return _", "def __init__(self, url, proxy=None, **kwargs):\n self.proxy = proxy\n self.query_params = urllib.parse.urlencode(kwargs)\n self.url = url if not self.query_params else f\"{url}?{self.query_params}\"\n logger.info(\"UrllibHandler initialized: url=%s, proxy=%s\", self.url, self.proxy)", "def get_request_handler(self):\n if not hasattr(self, '_oauth_handler'):\n handler_class = self.get_handler_class()\n server = self.get_server()\n self._oauth_handler = handler_class(server)\n return self._oauth_handler", "def getRedirectedURL(url):\n try:\n cj = cookielib.CookieJar()\n cp = urllib2.HTTPCookieProcessor(cj)\n opener = urllib2.build_opener(cp)\n opener.addheaders = [('User-agent', 'Mozilla/5.0')]\n url = url.replace('^https', 'http')\n request = urllib2.Request(url)\n u = opener.open(request)\n redirected_url = u.geturl()\n return redirected_url\n except urllib2.URLError:\n print \"URLError: %s\" % url\n return None\n except (urllib2.HTTPError, BadStatusLine, InvalidURL):\n print \"HTTPError: %s\" % (url)\n return None\n except (socket.timeout):\n print \"Timeout: %s\" % url\n return None\n except Exception as e:\n print \"Error: %s\" % url\n print e\n return None", "def urlopen(url):\n logging.info(\"urlopen %s\", url)\n \n try:\n return _urlopen(url)\n except ProxyError, e:\n logging.error(\"%s - %s\", str(e), url)\n response = ProxyHTTPResponse(url, None, method=\"GET\")\n response.error_bad_gateway()\n return response", "def handler(req):\n name = gethandlername(req.uri)\n if name == \"dispatcher\":\n raise404(\"Can't display the dispatcher\")\n handlerfunc = gethandlerfunc(name)\n return handlerfunc(req)", "def _find_url_handler(self, req):\n # First try - lookup in explicit (non parameterized URLs)\n if req.path in self.explicit_url_map:\n return self.explicit_url_map[req.path]\n # Second try - strip last path segment and lookup in another map\n idx = req.path.rfind(b'/') + 1\n path2 = req.path[:idx]\n if len(path2) > 0 and path2 in self.parameterized_url_map:\n # Save parameter into request\n req._param = req.path[idx:].decode()\n return self.parameterized_url_map[path2]\n\n if self.catch_all_handler:\n return self.catch_all_handler\n\n # No handler found\n return (None, None)", "def make_request(url):\n logger = logging.getLogger(\"http_request\")\n\n ff_agent = \"Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0)\" + \\\n \"Gecko/20100101 Firefox/35.0\"\n req = Request(url, headers={\"User-Agent\": ff_agent})\n\n logger.info(\"Sending request to %s\", url)\n try:\n response = urlopen(req)\n except URLError as e:\n if hasattr(e, \"reason\"):\n logger.error(\"Failed to reach a server. URL: %s Reason: %s\",\n (url, e.reason))\n elif hasattr(e, \"code\"):\n logger.error(\"The server couldn't fulfill the request. \" +\n \"URL: %s Error code: %s\", url, e.code)\n else:\n logger.error(\"Unknown URLError while making request. \" +\n \"URL: %s Error: %s\", url, str(e))\n return\n except Exception as e:\n logger.error(\"Unknown Exception while making request. \" +\n \"URL: %s Exception: %s\", url, str(e))\n return\n\n response_url = response.geturl()\n if response_url != url:\n logging.warn(\"Response url does not match requested url. \" +\n \"Request: %s Response %s\", url, response_url)\n\n try:\n page = response.read()\n except Exception as e:\n logger.error(\"Failed to read page from response. URL: %s Error: %s\",\n (url, str(e)))\n return\n\n logger.info(\"Successfully fetched page %s\", url)\n return page", "def redirect_request(self, req, fp, code, msg, headers, newurl):\n m = req.get_method()\n if (code in (301, 302, 303, 307) and m in (\"GET\", \"HEAD\")\n or code in (301, 302, 303) and m == \"POST\"):\n # Strictly (according to RFC 2616), 301 or 302 in response\n # to a POST MUST NOT cause a redirection without confirmation\n # from the user (of urllib2, in this case). In practice,\n # essentially all clients do redirect in this case, so we\n # do the same.\n # be conciliant with URIs containing a space\n newurl = newurl.replace(' ', '%20')\n newheaders = dict((k,v) for k,v in req.headers.items()\n if k.lower() not in (\"content-length\", \"content-type\")\n )\n return Request2(newurl,\n headers=newheaders,\n origin_req_host=req.get_origin_req_host(),\n unverifiable=True,\n method=\"GET\" if code == 303 else m)\n else:\n raise urllib2.HTTPError(req.get_full_url(), code, msg, headers, fp)", "def dispatcher(handlers):\n\n # Transforms wsgi_env, start_response args into request\n @wrappers.Request.application\n def dispatch(request):\n \"\"\"Handle one request.\"\"\"\n for url_re, app in handlers:\n matcher = re.match(url_re, request.path)\n if matcher and matcher.end() == len(request.path):\n if app is not None:\n # Send a response via the app specified in the handler.\n return app\n else:\n # The import must have failed. This will have been logged\n # at import time. Send a 500 error response.\n return response_for_error(httplib.INTERNAL_SERVER_ERROR)\n logging.error('No handler found for %s', request.path)\n return response_for_error(httplib.NOT_FOUND)\n\n return dispatch", "def __call__(self, environ, start_response):\n path_info = environ['PATH_INFO']\n for key, redirect in self.redirects.items():\n if self.match(key, path_info):\n environ['PATH_INFO'] = redirect\n return self(environ, start_response)\n else:\n path, cut, prefix = self.first_path_segment(path_info)\n root = path[:cut]\n rest = path[cut:]\n if root in self.routes:\n environ['PATH_INFO'] = rest\n #XXX shouldn't we += to SCRIPT_NAME?\n environ['SCRIPT_NAME'] = prefix + root\n app = self.routes[root]\n else:\n app = webob.exc.HTTPNotFound()\n return app(environ, start_response)", "def get_redirect(self, url):\n self._load_redirects()\n if url not in self.redirects:\n return url\n return self.redirects[url]", "def _request_handler_factory(custom_param):\n\n def factory(*args):\n return _RequestHandler(custom_param, *args)\n\n return factory", "def redirect(url, status=None):\n raise cherrypy.HTTPRedirect(url, status)", "def processUrl(self, url: str) -> dict:\n site = self.sf.urlFQDN(url)\n cookies = None\n\n # Filter out certain file types (if user chooses to)\n if list(filter(lambda ext: url.lower().split('?')[0].endswith('.' + ext.lower()), self.opts['filterfiles'])):\n # self.debug(f\"Ignoring URL with filtered file extension: {link}\")\n return None\n\n if site in self.siteCookies:\n self.debug(f\"Restoring cookies for {site}: {self.siteCookies[site]}\")\n cookies = self.siteCookies[site]\n\n # Fetch the contents of the supplied URL\n fetched = self.sf.fetchUrl(\n url,\n cookies=cookies,\n timeout=self.opts['_fetchtimeout'],\n useragent=self.opts['_useragent'],\n sizeLimit=10000000,\n verify=False\n )\n self.fetchedPages[url] = True\n\n if not fetched:\n return None\n\n # Track cookies a site has sent, then send the back in subsquent requests\n if self.opts['usecookies'] and fetched['headers'] is not None:\n if fetched['headers'].get('Set-Cookie'):\n self.siteCookies[site] = fetched['headers'].get('Set-Cookie')\n self.debug(f\"Saving cookies for {site}: {self.siteCookies[site]}\")\n\n if url not in self.urlEvents:\n # TODO: be more descriptive\n self.error(\"Something strange happened - shouldn't get here: url not in self.urlEvents\")\n self.urlEvents[url] = None\n\n # Notify modules about the content obtained\n self.contentNotify(url, fetched, self.urlEvents[url])\n\n real_url = fetched['realurl']\n if real_url and real_url != url:\n # self.debug(f\"Redirect of {url} to {real_url}\")\n # Store the content for the redirect so that it isn't fetched again\n self.fetchedPages[real_url] = True\n # Notify modules about the new link\n self.urlEvents[real_url] = self.linkNotify(real_url, self.urlEvents[url])\n url = real_url # override the URL if we had a redirect\n\n data = fetched['content']\n\n if not data:\n return None\n\n if isinstance(data, bytes):\n data = data.decode('utf-8', errors='replace')\n\n # Extract links from the content\n links = SpiderFootHelpers.extractLinksFromHtml(\n url,\n data,\n self.getTarget().getNames()\n )\n\n if not links:\n self.debug(f\"No links found at {url}\")\n return None\n\n # Notify modules about the links found\n # Aside from the first URL, this will be the first time a new\n # URL is spotted.\n for link in links:\n if not self.opts['reportduplicates']:\n if link in self.urlEvents:\n continue\n # Supply the SpiderFootEvent of the parent URL as the parent\n self.urlEvents[link] = self.linkNotify(link, self.urlEvents[url])\n\n self.debug(f\"Links found from parsing: {links.keys()}\")\n return links", "def json_redirect(url):\n return json_response(isRedirect=True, redirectTo=url)", "def redirect(url, code=None):\r\n if code is None:\r\n code = 303 if request.get('SERVER_PROTOCOL') == \"HTTP/1.1\" else 302\r\n location = urljoin(request.url, url)\r\n raise HTTPResponse(\"\", status=code, header=dict(Location=location))", "def follow_redirects(self, url):\n try:\n return requests.get(url).url\n except requests.RequestException:\n return None" ]
[ "0.8006581", "0.63602716", "0.60251427", "0.5762764", "0.562808", "0.5593964", "0.5454222", "0.5417794", "0.54076666", "0.5355651", "0.5345712", "0.53292465", "0.53158814", "0.53054017", "0.526144", "0.5258285", "0.52532136", "0.5251986", "0.52408147", "0.5233149", "0.5226429", "0.51969796", "0.5193553", "0.51904196", "0.51902217", "0.5186876", "0.5181511", "0.51453596", "0.5138872", "0.5126173" ]
0.6883462
1
loop and copy console>serial until config.exit_char character is found. when config.menu_char is found, interpret the next key locally.
def writer(self): menu_active = False try: while self.alive: try: char = self.console.getkey() except KeyboardInterrupt: char = '\x03' if menu_active: # Menu character again/exit char -> send itself if char in self.config.menu_char: self.serial.write(char) # send character elif char in self.config.exit_char: self.stop() break # exit app elif char in 'hH?': # h, H, ? -> Show help sys.stderr.write(self.get_help_text()) elif char in self.config.photo_char: ENV.send_image_f = "Asked by console" else: sys.stderr.write('--- unknown menu character %s ---\n' % char) menu_active = False elif char in self.config.menu_char: # next char will be for menu menu_active = True elif char == '\n' or ord(char) == 10: sys.stderr.write('\n') else: self.serial.write(char) # send character except: self.alive = False raise
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getKey(self):\n while not rospy.is_shutdown():\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)\n time.sleep(.05)", "def run(self):\n global key\n getch = _GetchUnix()\n key = getch()\n while key != \"e\":\n key = getch()\n #time.sleep(0.1)", "def ReadKeys(self):\n\n reg = re.compile(r\"\\w|\\s\")\n chars = \"\"\n while True:\n key = getch()\n keynum = ord(key)\n\n if keynum == 27: #escape\n self.shouldExit = True\n return \"\"\n\n if keynum == 13: #enter\n stdout.write(\"\\n\")\n break\n\n if keynum == 8: #backspace\n chars = chars[:-1]\n stdout.write(key)\n stdout.write(\" \")\n stdout.write(key)\n continue\n\n if reg.match(key): \n chars += key\n stdout.write(key)\n\n return chars", "def enter_repl(self):\n text_input = ''\n while True:\n text_input = input('>>')\n if text_input == 'exit':\n break\n #An alias for querying an instrument error string\n elif text_input == 'err?':\n self.write_to_serial(':SYST:ERR?')\n print(self.read_from_serial())\n else:\n self.write_to_serial(text_input)\n print(self.read_from_serial())", "def cmdKeyboard(self, dev):\n # Slaap één-tiende van een seconde om te voorkomen dat de toetsaanslag <enter>\n # wordt gepakt als het wordt uitgevoerd in een terminal\n time.sleep(0.1)\n\n self.hitsKeyboards[dev] = False\n f = open(self.inputPath + dev, 'rb')\n f.flush()\n while self.live:\n # Lees de toetsaanslag --> Pak de 42ste byte\n self.hitsKeyboards[dev] = f.read(144)[42]\n time.sleep(0.1)", "def get(self):\n fd = stdin.fileno()\n default = termios.tcgetattr(fd)\n\n tty.setraw(stdin.fileno()) # set terminal to raw input mode to get bytes\n\n while True: # read in until a key is pressed\n char = ord(stdin.read(1))\n if char != \"\":\n break\n # logic for keyboard interrupt\n if char == 0x03: # is the input the interrupt code?\n termios.tcsetattr(fd, termios.TCSADRAIN, default)\n raise KeyboardInterrupt\n # logic for when the user hits enter\n elif char == 0x0D or char == 0x0A: # enter is one of these depending on system\n marked = self.mask[self.cursor_location]\n\n # toggle the corresponding spot in the selection mask\n self.mask[self.cursor_location] = not self.mask[self.cursor_location]\n\n current = self.options[self.cursor_location]\n if marked: # if the item was previously selected\n self.selected = list(\n filter(lambda item: item != current, self.selected)\n ) # remove item from selecteed\n else: # if not\n self.selected.append(current) # add it to the list of selected options\n # logic for arrow keys\n # these keypresses are three bytes long\n # the first byte is an escape character\n elif char == 0x1B: # check for escape character\n if ord(stdin.read(1)) == 0x5B: # check for next byte, same for up and down\n last = ord(stdin.read(1))\n if last == 0x42: # up arrow\n # adjust the cursor position, wrapping if reached the end\n self.cursor_location = (self.cursor_location + 1) % len(\n self.options\n )\n elif last == 0x41: # down arrow\n self.cursor_location = (self.cursor_location - 1) % len(\n self.options\n )\n termios.tcsetattr(\n fd, termios.TCSADRAIN, default\n ) # reset the terminal out of raw mode", "def inputloop():\n while True:\n for char in raw_input().decode('utf-8'):\n print script(char)", "def get_key():\n\tinput_key: str = \"\"\n\ttry:\n\t\twhile not False:\n\t\t\twith Raw(sys.stdin):\n\t\t\t\tif not select([sys.stdin], [], [], 0.1)[0]: #* Wait 100ms for input on stdin then restart loop to check for stop flag\n\t\t\t\t\tcontinue\n\t\t\t\tinput_key += sys.stdin.read(1) #* Read 1 key safely with blocking on\n\t\t\t\tif input_key == \"\\033\": #* If first character is a escape sequence keep reading\n\t\t\t\t\twith Nonblocking(sys.stdin): #* Set non blocking to prevent read stall\n\t\t\t\t\t\tinput_key += sys.stdin.read(20)\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<\"):\n\t\t\t\t\t\t\t_ = sys.stdin.read(1000)\n\t\t\t\tprint(\"INPUT: \"+input_key.replace(\"\\033\",\"<ESC>\"))\n\t\t\t\tif input_key == \"\\033\" or input_key == \"q\": #* Key is \"escape\" key if only containing \\033\n\t\t\t\t\tbreak\n\t\t\t\telif input_key.startswith((\"\\033[<0;\", \"\\033[<35;\", \"\\033[<64;\", \"\\033[<65;\")): #* Detected mouse event\n\t\t\t\t\ttry:\n\t\t\t\t\t\tprint((int(input_key.split(\";\")[1]), int(input_key.split(\";\")[2].rstrip(\"mM\"))))\n\t\t\t\t\texcept:\n\t\t\t\t\t\tpass\n\t\t\t\t\telse:\n\t\t\t\t\t\tif input_key.startswith(\"\\033[<35;\"):\n\t\t\t\t\t\t\tprint(\"mouse Move\") #* Detected mouse move in mouse direct mode\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<64;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll UP\") #* Detected mouse scroll up\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<65;\"):\n\t\t\t\t\t\t\tprint(\"mouse Scroll DOWN\") #* Detected mouse scroll down\n\t\t\t\t\t\telif input_key.startswith(\"\\033[<0;\") and input_key.endswith(\"m\"):\n\t\t\t\t\t\t\tprint(\"mouse Click Release\") #* Detected mouse click release\n\t\t\t\tinput_key = \"\"\n\texcept Exception as e:\n\t\tprint(f'EXCEPTION: Input thread failed with exception: {e}')", "def run_terminal():\n playing = True\n while playing:\n print(\"$\", end='')\n command = input()\n list_of_inputs = command.split(\" \")\n if list_of_inputs[0] == \"exit\":\n playing = False\n break\n handle_command(list_of_inputs)", "def move_debug(self, environment):\n\n ch2 = sys.stdin.read(1)\n\n if ch2 == \"w\":\n # the up arrow key was pressed\n print(\"up key pressed\")\n\n elif ch2 == \"s\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n elif ch2 == \"a\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == \"d\":\n # the right arrow key was pressed\n print(\"right key pressed\")", "def _configure_keyboard(self):\n fd = sys.stdin.fileno()\n self.original_kbd_settings = termios.tcgetattr(fd)\n new = termios.tcgetattr(fd)\n new[3] = new[3] & ~termios.ECHO # lflags\n new[3] = new[3] & ~termios.ICANON # lflags\n new[6][6] = '\\000' # Set VMIN to zero for lookahead only\n termios.tcsetattr(fd, termios.TCSADRAIN, new)", "def console():\r\n while True:\r\n interpret_command(input(\"POM> \"))", "def onKeyPress(self):\n ch = read(fd, 4)\n if ch == '\\033': # escape\n self.pause()\n elif '\\033' in ch:\n return\n elif '\\t' in ch: # tab\n return\n elif len(self.user_input) >= 80: # too long\n self.user_input[:80]\n return\n elif ch == '\\r': # return\n if self.user_input == \"\":\n return\n command = command_list.match(self.user_input)\n if not command:\n pass\n elif command.group(1):\n self._save(0)\n elif command.group(2):\n self._save()\n elif command.group(3):\n self._save(command.group(4))\n link = self.links.match(self.user_input.lower())\n if link:\n self.reset(link.group(0))\n self.user_input = \"\"\n self.locked += 1\n print '\\033[0m'\n print_loc(' '*80, self.y+5, self.x+2)\n #print_loc(' '*80, self.y+6, 0)\n self.locked -= 1\n elif ch == '\\x7f': # backspace\n if self.user_input == \"\":\n return\n self.user_input = self.user_input[:-1]\n elif ch == ' ': # space\n if self.user_input == \"\":\n return\n elif self.user_input[-1] == ' ':\n return\n self.user_input += ' '\n else: # all else\n self.user_input += ch\n self.locked += 1\n # Highlight valid user input\n if self.links.match(self.user_input.lower()):\n print '\\033[0;96;4m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n elif command_list.match(self.user_input):\n print '\\033[0;1;92m'\n print_loc(self.user_input+'\\033[0;1m < \\033[0m ', self.y + 5, self.x)\n else:\n print '\\033[0m'\n # Display new user input line\n print_loc(self.user_input+'\\033[0;7m \\033[0m ', self.y + 5, self.x)\n self.locked -= 1", "def getKey(self):\n tty.setraw(sys.stdin.fileno())\n select.select([sys.stdin], [], [], 0)\n self.key = sys.stdin.read(1)\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, self.settings)", "def send_enter():\n sys.stdout.write('\\x0D') # send carriage return\n sys.stdout.flush()", "def sync(self):\n available = self.count\n if available > 0:\n available = available + 2\n buf = self.read_keypad(available)\n for raw in buf:\n evt = KeyEvent(_seesaw_key((raw >> 2) & 0x3F), raw & 0x3)\n if (\n evt.number < _NEO_TRELLIS_NUM_KEYS\n and self.callbacks[evt.number] is not None\n ):\n self.callbacks[evt.number](evt)", "def interact(self):\n if not self.connected(): return\n\n try:\n if sys.platform == 'win32':\n import msvcrt\n else:\n import tty, termios\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n tty.setraw(fd)\n\n self.start_listener()\n self.start_anti_idle_timer()\n\n sys.stdout.write(self.prompt)\n\n pre_ch = b''\n while True:\n if sys.platform == 'win32':\n ch = msvcrt.getch()\n if ch == b'\\xe0':\n ch = b'\\x1b'\n if pre_ch == b'\\x1b':\n if ch == b'K': ch = b'[D' # left arrow\n elif ch == b'M': ch = b'[C' # right arrow\n elif ch == b'H': ch = b'[A' # up arrow\n elif ch == b'P': ch = b'[B' # down arrow\n else:\n ch = sys.stdin.read(1)\n if not ch:\n break\n if not self.connected():\n break\n\n self.write(ch)\n pre_ch = ch\n\n if not self.connected():\n break\n finally:\n if sys.platform != 'win32':\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n self.cancel_anti_idle_timer()", "async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn", "def until_not_multi(chars) -> str:\n import sys\n chars = list(chars)\n y = \"\"\n sys.stdout.flush()\n while True:\n i = read_single_keypress()\n _ = sys.stdout.write(i)\n sys.stdout.flush()\n if i not in chars:\n break\n y += i\n return y", "def main(self):\n while self.leave_main_menu:\n print(fr.FR[4], fr.FR[5], fr.FR[6], fr.FR[7])\n self.choice_menu = input(fr.FR[8])\n self.main_menu_input()", "def init_keystrokes(self):\n import x84.bbs.session\n term = x84.bbs.session.getterminal()\n self.keyset['home'].append(term.KEY_HOME)\n self.keyset['end'].append(term.KEY_END)\n self.keyset['pgup'].append(term.KEY_PGUP)\n self.keyset['pgdown'].append(term.KEY_PGDOWN)\n self.keyset['up'].append(term.KEY_UP)\n self.keyset['down'].append(term.KEY_DOWN)\n self.keyset['down'].append(term.KEY_ENTER)\n self.keyset['exit'].append(term.KEY_ESCAPE)", "def enter_raw_repl(self):\n\n debug_indent(\"enter_raw_repl\")\n\n time.sleep(0.5) # allow some time for board to reset\n debug(r'self.con.write \"\\r\\x03\\x03\" (Ctrl-C twice)')\n self.con.write(b\"\\r\\x03\\x03\") # ctrl-C twice: interrupt any running program\n\n # flush input (without relying on serial.flushInput())\n n = self.con.inWaiting()\n while n > 0:\n self.con.read(n)\n n = self.con.inWaiting()\n\n if self.con.survives_soft_reset():\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n>\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n>\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 1\")\n\n debug(r'self.con.write \"\\x04\" (soft reset)')\n self.con.write(b\"\\x04\") # ctrl-D: soft reset\n data = self.read_until(1, b\"soft reboot\\r\\n\", timeout=10)\n if not data.endswith(b\"soft reboot\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 2\")\n\n # By splitting this into 2 reads, it allows boot.py to print stuff,\n # which will show up after the soft reboot and before the raw REPL.\n data = self.read_until(1, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 3\")\n\n else:\n\n debug(r'self.con.write \"\\r\\x01\" (enter raw REPL)')\n self.con.write(b\"\\r\\x01\") # ctrl-A: enter raw REPL\n data = self.read_until(0, b\"raw REPL; CTRL-B to exit\\r\\n\", timeout=10)\n\n if not data.endswith(b\"raw REPL; CTRL-B to exit\\r\\n\"):\n print(data)\n debug_unindent()\n raise PyboardError(\"could not enter raw repl 4\")\n debug_unindent()", "def _on_key_down(self, event, skip=True):\n # FIXME: This method needs to be broken down in smaller ones.\n current_line_num = self.GetCurrentLine()\n key_code = event.GetKeyCode()\n if key_code in (ord('c'), ord('C')) and event.ControlDown():\n # Capture Control-C\n if self._input_state == 'subprocess':\n if self.debug:\n print >>sys.__stderr__, 'Killing running process'\n if hasattr(self._running_process, 'process'):\n self._running_process.process.kill()\n elif self._input_state == 'buffering':\n if self.debug:\n print >>sys.__stderr__, 'Raising KeyboardInterrupt'\n raise KeyboardInterrupt\n # XXX: We need to make really sure we\n # get back to a prompt.\n elif self._input_state == 'subprocess' and (\n ( key_code <256 and not event.ControlDown() )\n or\n ( key_code in (ord('d'), ord('D')) and\n event.ControlDown())):\n # We are running a process, we redirect keys.\n ConsoleWidget._on_key_down(self, event, skip=skip)\n char = chr(key_code)\n # Deal with some inconsistency in wx keycodes:\n if char == '\\r':\n char = '\\n'\n elif not event.ShiftDown():\n char = char.lower()\n if event.ControlDown() and key_code in (ord('d'), ord('D')):\n char = '\\04'\n self._running_process.process.stdin.write(char)\n self._running_process.process.stdin.flush()\n elif key_code in (ord('('), 57, 53):\n # Calltips\n event.Skip()\n self.do_calltip()\n elif self.AutoCompActive() and not key_code == ord('\\t'):\n event.Skip()\n if key_code in (wx.WXK_BACK, wx.WXK_DELETE):\n wx.CallAfter(self._popup_completion, create=True)\n elif not key_code in (wx.WXK_UP, wx.WXK_DOWN, wx.WXK_LEFT,\n wx.WXK_RIGHT, wx.WXK_ESCAPE):\n wx.CallAfter(self._popup_completion)\n else:\n # Up history\n if key_code == wx.WXK_UP and (\n event.ControlDown() or\n current_line_num == self.current_prompt_line\n ):\n new_buffer = self.get_history_previous(\n self.input_buffer)\n if new_buffer is not None:\n self.input_buffer = new_buffer\n if self.GetCurrentLine() > self.current_prompt_line:\n # Go to first line, for seemless history up.\n self.GotoPos(self.current_prompt_pos)\n # Down history\n elif key_code == wx.WXK_DOWN and (\n event.ControlDown() or\n current_line_num == self.LineCount -1\n ):\n new_buffer = self.get_history_next()\n if new_buffer is not None:\n self.input_buffer = new_buffer\n # Tab-completion\n elif key_code == ord('\\t'):\n current_line, current_line_num = self.CurLine\n if not re.match(r'^%s\\s*$' % self.continuation_prompt(),\n current_line):\n self.complete_current_input()\n if self.AutoCompActive():\n wx.CallAfter(self._popup_completion, create=True)\n else:\n event.Skip()\n elif key_code == wx.WXK_BACK:\n # If characters where erased, check if we have to\n # remove a line.\n # XXX: What about DEL?\n # FIXME: This logics should be in ConsoleWidget, as it is\n # independant of IPython\n current_line, _ = self.CurLine\n current_pos = self.GetCurrentPos()\n current_line_num = self.LineFromPosition(current_pos)\n current_col = self.GetColumn(current_pos)\n len_prompt = len(self.continuation_prompt())\n if ( current_line.startswith(self.continuation_prompt())\n and current_col == len_prompt):\n new_lines = []\n for line_num, line in enumerate(\n self.input_buffer.split('\\n')):\n if (line_num + self.current_prompt_line ==\n current_line_num):\n new_lines.append(line[len_prompt:])\n else:\n new_lines.append('\\n'+line)\n # The first character is '\\n', due to the above\n # code:\n self.input_buffer = ''.join(new_lines)[1:]\n self.GotoPos(current_pos - 1 - len_prompt)\n else:\n ConsoleWidget._on_key_down(self, event, skip=skip)\n else:\n ConsoleWidget._on_key_down(self, event, skip=skip)", "def read_next_command(self):\n colors = self.colors\n message = 'ddgr (? for help)'\n prompt = (colors.prompt + message + colors.reset + ' ') if colors else (message + ': ')\n enter_count = 0\n while True:\n try:\n cmd = input(prompt)\n except EOFError:\n sys.exit(0)\n\n if not cmd:\n enter_count += 1\n if enter_count == 2:\n # Double <enter>\n sys.exit(0)\n else:\n enter_count = 0\n\n cmd = ' '.join(cmd.split())\n if cmd:\n self.cmd = cmd\n break", "def process_key(key):\n print(chr(key))", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('\\n','\\r'):\n ch = self.defaultButton[0].lower()\n \n if ch == self.yesMessage[0].lower():\n self.yesButton()\n elif ch == self.noMessage[0].lower():\n self.noButton()\n elif ch == 'c':\n self.cancelButton()\n \n return \"break\"", "def menu():\n menu = 'main'\n while 1:\n if menu == 'main':\n click.echo('Main menu:')\n click.echo(' d: debug menu')\n click.echo(' q: quit')\n char = click.getchar()\n if char == 'd':\n menu = 'debug'\n elif char == 'q':\n menu = 'quit'\n else:\n click.echo('Invalid input')\n elif menu == 'debug':\n click.echo('Debug menu')\n click.echo(' b: back')\n char = click.getchar()\n if char == 'b':\n menu = 'main'\n else:\n click.echo('Invalid input')\n elif menu == 'quit':\n return", "def __exit__(self, exc_type, exc_val, exc_tb):\n self.stdscr.keypad(False)\n self.stdscr.nodelay(False)\n curses.echo()\n curses.nocbreak()\n curses.endwin()", "def wait_key():\n result = None\n if os.name == 'nt':\n result = input(\"Press Enter to continue...\")\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n result = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\n return result", "def send_cmd(self):\n\n cmd = self.repl_input.get().encode()\n self.serial.write(cmd + b\"\\r\")\n self.repl_input.set(\"\")" ]
[ "0.60768646", "0.5757084", "0.57472295", "0.572881", "0.5679723", "0.5675239", "0.56368023", "0.5612974", "0.560551", "0.5580252", "0.55787677", "0.5541301", "0.55385447", "0.5523714", "0.54723734", "0.5423755", "0.54224914", "0.5421046", "0.54165447", "0.5408846", "0.5373981", "0.5367405", "0.5362362", "0.53611", "0.5317438", "0.5311899", "0.5284995", "0.52671164", "0.526561", "0.5253077" ]
0.66630965
0
Getting mri (most recent influence) Returns 0 if no influence exists
def _get_mri(journal): try: return Influence.objects.filter(journal__issn=journal.issn).order_by('-date_stamp')[0] except IndexError: return 0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_reward(self):\n if self.is_game_done:\n return self.price - 1\n else:\n return 0.0", "def get_reward(state, resolution, grid_x, grid_y):\n a,b = single_index_to_index(state, resolution)\n position = index_to_obs(a, b, grid_x, grid_y )[0]\n if position >= 0.5:\n return 0\n return -1", "def _compute_reward(self): \n reward = -1\n return reward", "def _get_reward(self, action):\n HIRE_COST = 1 # TODO 7/29/20 - Determine significance of this value\n\n # Lookup the state representation using the cur_state index. Then we\n # can get the candidate productivity score.\n obs = self.observation_function[self.cur_state]\n prod_score = obs[1]\n r = action*(prod_score - HIRE_COST)\n return r", "def intensity(self) -> int:", "def getReward(self):\n return self._mdp.R[self._prev_state,self._cur_state]", "def m(self, element):\n if element in self.focals:\n return self.focals[element]\n return 0", "def getIR3() -> int:\n pass", "def getIR1() -> int:\n pass", "def getIR2() -> int:\n pass", "def getReward(self, active_corr, simulator, p, active_goal):\n i_r = self.correlations[active_corr].i_reward\n # if i_r is None:\n # reward = self.simulator.getReward()\n # elif self.correlations[i_r].getCertainty() > self.threshold:\n if i_r is None:\n reward = simulator\n elif self.correlations[i_r].getCertainty(p, active_goal) > self.threshold:\n reward = 1\n else:\n reward = 0\n return reward", "def min_mireds(self):\n return 175", "def _get_reward(self):\n if self.status():\n return self.current_step/self.ep_length # the reward is proportional to the duration \n else:\n return 0", "def get_reward(self, done):\n reward = 0\n self.calc_pos_diff_ratio()\n reward = self.calc_base_reward_2(reward)\n\n return reward", "def getInteractionRate(self):\n m = mctal.MCTAL(self.name+'.m')\n t = m.tallies[4]\n # Returing the total\n return t.data[-1],t.errors[-1]", "def get_efermi(fn):\n try:\n f = open(fn)\n except:\n return 0\n line = f.readline()\n f.close()\n ef = float(line.split()[6])\n print('Calculated Fermi level: {0}'.format(ef))\n return ef", "def MFE_rel(self):\n try:\n return(self.MFE / self.price_open)\n except:\n return", "def _calculate_r0(net):\n\n r0 = 0\n for reaction in net.reactions:\n t = reaction.rate(net.species)\n r0 += t\n\n return r0", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def get_rzero(self):\n return self.get_resistance() * math.pow((self.ATMOCO2/self.PARA), (1./self.PARB))", "def last_fmeasure(self):\n return self.get_fvalue(self.last_position())", "def _get_reward(self, terminal):\n if not terminal:\n return 0\n\n folded_design, _ = fold(self.design.primary)\n hamming_distance = hamming(folded_design, self.target.dot_bracket)\n if 0 < hamming_distance < self._env_config.mutation_threshold:\n hamming_distance = self._local_improvement(folded_design)\n\n normalized_hamming_distance = hamming_distance / len(self.target)\n\n # For hparam optimization\n episode_info = EpisodeInfo(\n target_id=self.target.id,\n time=time.time(),\n normalized_hamming_distance=normalized_hamming_distance,\n )\n self.episodes_info.append(episode_info)\n\n return (1 - normalized_hamming_distance) ** self._env_config.reward_exponent", "def getTotalReward(self):\n return self.cumreward", "def getTotalReward(self):\n return self.cumreward", "def get_rmax(self):\n return self.rmax", "def get_reward(self) -> float:\r\n field = self.fields[self.agent_x][self.agent_y]\r\n if field == Field.EMPTY:\r\n return self.rew_empty\r\n elif field == Field.POS_TERMINAL:\r\n return self.rew_pos\r\n elif field == Field.NEG_TERMINAL:\r\n return self.rew_neg\r\n\r\n raise ValueError # Agent is standing on an illegal tile!\r", "def getTotalReward(self):\n return self.lastFitness", "def get_reward(self):\n return self.calc_reward(self.sim.pose[:3], self.sim.v)", "def _compute_reward(self):\n last_score = self.episode_qualities[-2]\n new_score = self.episode_qualities[-1]\n reward = new_score - last_score\n return reward", "def ml_result(self, var, e):\n\t\tdist = self.enumerate_ask(var, e)\n\t\treturn max(dist.items(), key=lambda x:x[1])[0]" ]
[ "0.575768", "0.5676095", "0.56729364", "0.56670934", "0.559796", "0.55813545", "0.5575672", "0.55736303", "0.55562896", "0.55536056", "0.5553515", "0.5538506", "0.5526368", "0.55253816", "0.5502831", "0.5489734", "0.5470172", "0.5469639", "0.5466303", "0.5466303", "0.54242605", "0.5423435", "0.5418171", "0.5418171", "0.5406587", "0.5402487", "0.5394824", "0.5374265", "0.5369289", "0.5366906" ]
0.70698816
0
Return True if the node is a "real" endpoint of an edge in the network, \ otherwise False. OSM data includes lots of nodes that exist only as \ points to help streets bend around curves. An end point is a node that \
def is_endpoint(G: nx.Graph, node: int, strict=True): neighbors = set(list(G.predecessors(node)) + list(G.successors(node))) n = len(neighbors) d = G.degree(node) if node in neighbors: # If the node appears in its list of neighbors, it self-loops. this is # always an endpoint. return True # If node has no incoming edges or no outgoing edges, it must be an # endpoint elif G.out_degree(node) == 0 or G.in_degree(node) == 0: return True elif not (n == 2 and (d == 2 or d == 4)): # Else, if it does NOT have 2 neighbors AND either 2 or 4 directed # edges, it is an endpoint. either it has 1 or 3+ neighbors, in which # case it is a dead-end or an intersection of multiple streets or has # 2 neighbors but 3 degree (indicating a change from oneway to twoway) # or more than 4 degree (indicating a parallel edge) and thus is an # endpoint return True elif not strict: # Non-strict mode osmids = [] # Add all the edge OSM IDs for incoming edges for u in G.predecessors(node): for key in G[u][node]: osmids.append(G.edges[u, node, key]['osmid']) # Add all the edge OSM IDs for outgoing edges for v in G.successors(node): for key in G[node][v]: osmids.append(G.edges[node, v, key]['osmid']) # If there is more than 1 OSM ID in the list of edge OSM IDs then it is # an endpoint, if not, it isn't return len(set(osmids)) > 1 else: # If none of the preceding rules returned true, then it is not an # endpoint return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1", "def door_in_edge(self, edge: list) -> bool:\n doors = self.get_interior_doors()\n room1 = self.get_rooms()[edge[0]]\n room2 = self.get_rooms()[edge[1]]\n for i in range(len(doors)):\n if utils.door_room_relation(doors[i], room1) and utils.door_room_relation(doors[i], room2):\n return True\n return False", "def is_edge(self):\n if self._row == 0 or self._row == 9 or self._column == 0 or self._column == 9:\n # check that the edge is not actually a corner square\n if not self.is_corner():\n # If not a corner and in a border row return True\n return True\n\n return False", "def isEdge(self,x,y):\n\t\treturn y in self._dictOut[x]", "def isEdge(self, x, y):\n if y in self.parseX() or x in self.parseX():\n return y in self.dictOut[x]\n else :\n print(\"verteces not found\")", "def _point_faces_edge(self, edge, point):\n a = sqrt((edge[0][0] - edge[1][0]) ** 2 + (edge[0][1] - edge[1][1]) ** 2)\n b = sqrt((edge[0][0] - point[0]) ** 2 + (edge[0][1] - point[1]) ** 2)\n c = sqrt((edge[1][0] - point[0]) ** 2 + (edge[1][1] - point[1]) ** 2)\n ang1, ang2 = self._angle(b, a, c), self._angle(c, a, b)\n if ang1 > pi / 2 or ang2 > pi / 2:\n return False\n return True", "def isEdge(self, x, y):\n return y in self._dictOut[x]", "def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges", "def is_end_node():\n return False", "def contains_edge(self, node, other_node):\n return \\\n {node.get_name(), other_node.get_name()} in \\\n list([\n {edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()}\n for edge in self.get_edges()\n ]) # return true if there exists an edge between the input nodes and false otherwise", "def is_edge_server() -> bool:\n return Config().args.port is not None", "def is_self_referential(self, edge):\n # Determine if edge is directed or not to choose the proper splitting character\n split_str = gt.determine_split_string(edge)\n\n # split the edge\n edge_split = edge.split(split_str)\n\n return edge_split[0] == edge_split[-1] and (edge_split[0] == self.start_kind or\n edge_split[0] == self.end_kind)", "def isEdge(self,x,y):\r\n return self.matr[x][y]", "def endpoints(image):\n return _neighbors_conv(image) == 1", "def isEdge(self,x,y):\n\t\treturn self._matr[x][y]", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def isEdge(self,x,y):\n\t\treturn y in self._dict[x]", "def is_boundary_edge(a, b, bdy_edges):\n for edge in bdy_edges:\n a0, b0 = edge\n if a == a0 and b == b0:\n return True\n return False", "def hasEdge(self, startNodeId, endNodeId):\r\n for edge in self.nodes[startNodeId].adjList:\r\n if edge.endNodeId == endNodeId:\r\n return edge\r\n return None", "def has_edges(self):\n\n return len(self._edges) > 0", "def is_endpoint(color):\n\n img = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)\n blur = cv2.GaussianBlur(img,(5,5),0)\n\n lower_range = np.array([175, 175, 175], dtype=np.uint8)\n upper_range = np.array([255, 255, 255], dtype=np.uint8)\n\n mask = cv2.inRange(blur, lower_range, upper_range)\n res = cv2.bitwise_and(img,img, mask= mask)\n\n bilateral_filtered_image = cv2.bilateralFilter(res, 5, 175, 175)\n edge_detected_image = cv2.Canny(bilateral_filtered_image, 75, 200)\n\n _, contours, _= cv2.findContours(edge_detected_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n contour_list = []\n for contour in contours:\n \tapprox = cv2.approxPolyDP(contour,0.01*cv2.arcLength(contour,True),True)\n \tarea = cv2.contourArea(contour)\n \tif ((len(approx) > 8) & (area > 10000) & (area < 30000)):\n \t\tcontour_list.append(contour)\n\n if not len(contour_list)==0:\n \treturn True\n else:\n \treturn False", "def is_edge_site(self) -> bool:\n return self.config.edge", "def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy]))", "def edge_ground(X):\n gradient_x = img_conv(X, kernel_sobel_x)\n gradient_y = img_conv(X, kernel_sobel_x.transpose())\n mag = (gradient_x ** 2.0 + gradient_y ** 2.0) ** 0.5\n is_edge = mag > 1.0\n return is_edge.astype('f')", "def edge_not_in_component(edge, component):\n x_start = edge[0]\n x_stop = edge[0] + edge[2]\n y_start = edge[1]\n y_stop = edge[1] + edge[3]\n if x_start >= component[1].start and x_stop <= component[1].stop and y_start >= component[0].start and y_stop <= \\\n component[0].stop:\n return False\n else:\n return True", "def will_hit_edge(self, direction):\n return ((self.position <= 0 and direction.is_left()) or \n (self.position >= self.scene.step_count - 1 and \n direction.is_right()))", "def is_finite(self) -> bool:\n normal = self.to_normal_form()\n di_graph = nx.DiGraph()\n for production in normal.productions:\n body = production.body\n if len(body) == 2:\n di_graph.add_edge(production.head, body[0])\n di_graph.add_edge(production.head, body[1])\n try:\n nx.find_cycle(di_graph, orientation=\"original\")\n except nx.exception.NetworkXNoCycle:\n return True\n return False", "def IsWire(self, *args):\n return _BRepAlgo.BRepAlgo_EdgeConnector_IsWire(self, *args)", "def if_edge_state(self, s):\n if (s[0] == 0) or (s[0] == self.ni - 1) or (s[1] == 0) or (s[1] == self.nj - 1):\n return True\n else:\n return False", "def _can_access_endpoint(self, endpoint):\n if endpoint.visa_required:\n return self._has_valid_visa()\n else:\n return True" ]
[ "0.6984057", "0.62434214", "0.62212586", "0.61637247", "0.6161208", "0.6138825", "0.61363894", "0.6098323", "0.60917765", "0.6071788", "0.60438186", "0.6020851", "0.5994252", "0.5977758", "0.59772736", "0.59747833", "0.5952888", "0.5948707", "0.5904112", "0.58738995", "0.58695596", "0.5862068", "0.5843104", "0.5826069", "0.5801944", "0.5791275", "0.57862353", "0.57849574", "0.57780087", "0.5773884" ]
0.77966064
0
Recursively build a path of nodes until you hit an endpoint node. Please note this method is taken directly from OSMnx, and can be found in \
def build_path( G: nx.Graph, node: int, endpoints: List[int], path: List[int]) -> List[int]: # For each successor in the passed-in node for successor in G.successors(node): if successor not in path: # If successor is already in path, ignore it, otherwise add to path path.append(successor) if successor not in endpoints: # If successor not endpoint, recursively call # build_path until endpoint found path = build_path(G, successor, endpoints, path) else: # If successor is endpoint, path is completed, so return return path if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])): # If end of the path is not actually an endpoint and the path's # first node is a successor of the path's final node, then this is # actually a self loop, so add path's first node to end of path to # close it path.append(path[0]) return path
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def find_paths(self, start_key, target_key):\n\n stack = [(start_key, [start_key])]\n while stack:\n node_key, path = stack.pop()\n node = self.nodes[node_key]\n for nxt in node.neighbors - set(path):\n if nxt == target_key:\n yield path + [nxt]\n else:\n stack.append((nxt, path + [nxt]))", "def find_path(self, start):\n path = []\n leaf = start\n seen_nodes = []\n while True:\n if self.nodes[leaf]['address'] == '':\n return path\n\n left = leaf if self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n right = leaf if not self.nodes[leaf][\n 'left'] else self.nodes[leaf]['sibling']\n next_hash = do_hash(left + right, self.algo)\n leaf = self.nodes[leaf]['parent']\n assert leaf == next_hash\n assert next_hash not in seen_nodes\n assert next_hash in self.nodes\n step = [left, right, next_hash]\n path.append(step)", "def search_path(self):\n\n nodes = [self.start]\n final_node = None\n \n count = 0\n while True:\n count += 1\n\n if count % self.pick_target == 0:\n pick = self.goal.pos[:2]\n else:\n pick = self.car.random_pos()[:2]\n \n nearest = self.get_nearest_node(nodes, pick)\n\n if count % self.check_dubins == 0:\n solutions = self.dubins.find_tangents(nearest.pos, self.goal.pos)\n dubins_route, cost, valid = self.dubins.best_tangent(solutions)\n \n if valid:\n final_node = nearest\n break\n\n phi = self.get_steering_angle(nearest.pos, pick)\n pos = nearest.pos\n branch = [pos[:2]]\n \n for i in range(self.max_steps):\n pos = self.car.step(pos, phi)\n branch.append(pos[:2])\n \n # check safety of route-----------------------\n if phi == 0:\n safe = self.dubins.is_straight_route_safe(nearest.pos, pos)\n else:\n d, c, r = self.car.get_params(nearest.pos, phi)\n safe = self.dubins.is_turning_route_safe(nearest.pos, pos, d, c, r)\n # --------------------------------------------\n \n if not safe:\n continue\n \n new_node = Node(pos, phi, i+1)\n \n if new_node in nodes:\n continue\n \n new_node.branch = branch\n new_node.parent = nearest\n nodes.append(new_node)\n \n route = self.backtracking(final_node) + dubins_route\n path = self.car.get_path(self.car.start_pos, route)\n print('Total iteration:', count)\n \n return path, nodes", "def floyd_warshall_path(self, start, end, next_node): # pragma no cover\n if next_node[start][end] is None:\n return []\n path = [start]\n while start is not end:\n start = next_node[start][end]\n path.append(start)\n return path", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)", "def _build_path(self):\n for point_3d in self.path_coordinates:\n self.connect_point_with_neighbors(point_3d)", "def findPathsToBase(A,bSize):\n M,N = A.shape\n pressedPaths = []\n\n #For every two nodes in the base find all paths between them\n for b1 in range(bSize):\n for b2 in range(bSize):\n #Remove all other base nodes from the graph so that\n #we only find paths that go through the specialization set\n if b1 == b2:\n #In this case we are looking for a cycle.\n mask = [b1]+list(range(bSize,N))\n newSize = len(mask) + 1\n reduA = np.zeros((newSize,newSize))\n #Because the networkx cycle finders don't do what we need\n #them to do, we create a new graph and find paths instead\n reduA[:-1,:-1] = A[mask,:][:,mask]\n #Remove ingoing edges from the base node and add to new node\n reduA[-1,:] = reduA[0,:]\n reduA[0,:] = np.zeros(newSize)\n G = nx.DiGraph(reduA.T)\n #Find paths from the base node to the new node\n #same as finding all the cycles\n paths = list(nx.all_simple_paths(G,0,newSize-1))\n\n else:\n mask = [b1,b2]+list(range(bSize,N))\n reduA = A[mask,:][:,mask]\n #Remove base node interactions\n reduA[:2,:2] = np.zeros((2,2))\n G = nx.DiGraph(reduA.T)\n paths = list(nx.all_simple_paths(G,0,1))\n\n #Process Paths so that they make sense when the rest of the base\n #set is added to the graph\n for p in paths:\n if p != []:\n if b1 == b2:\n p = np.array(p) + bSize-1\n else:\n p = np.array(p) + bSize-2\n p[[0,-1]] = [b1, b2]\n pressedPaths.append(p)\n\n return pressedPaths", "def dfs_paths(graph, start, goal, method='dfs'):\n \n # Define the search method\n stack_pop = -1\n if method == 'bfs':\n stack_pop = 0\n \n stack = [(start, [start])]\n while stack:\n (vertex, path) = stack.pop(stack_pop)\n neighbors = node_neighbors(graph, vertex)\n for next_node in set(neighbors) - set(path):\n if next_node == goal:\n yield path + [next_node]\n else:\n stack.append((next_node, path + [next_node]))", "def define_path(self, node): \n if node.childrens!=[]:\n for child in node.childrens:\n node_child = child['node']\n node_child.times_used+=1\n self.define_path(node_child)\n \n \n #take care of not used nodes, set their gradient to 0\n for node in self.input_node:\n if node.times_used==0:\n node.gradient=np.zeros((node.output_dim, self.output_node.output_dim))", "def walk(self, priv_path:list):\n # End conditions for recursive loop\n current_node = priv_path[-1]\n if current_node.location in self.destination and len(priv_path)>1:\n self.addItinerary(priv_path)\n self.n_routes+=1\n return\n if self.n_routes >= self.max_n_routes:\n return\n\n if len(priv_path)>1:\n # Get metadata of last edge type\n last_edge = self.EdgeType(priv_path[-2], priv_path[-1])\n else: # If it's start of itinerary, next edge would be travel edge\n # So, make last edge as stay\n last_edge = 'stay'\n if last_edge == 'stay': # next edge will be travel i.e., ship not None\n next_nodes = [node for node in self.G.neighbors(current_node) \n if self.G.edges[current_node, node]['ship'] is not None]\n else: # Next edge will be stay, i.e., ship = None\n next_nodes = [node for node in self.G.neighbors(current_node)\n if self.G.edges[current_node, node]['ship'] is None]\n \n for node in next_nodes:\n self.walk(priv_path+[node])", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def Find_Path(self):\n closed_nodes_map = [] # map of closed (tried-out) nodes\n open_nodes_map = [] # map of open (not-yet-tried) nodes\n dir_map = [] # map of directions\n row = [0] * self.n\n for i in range(self.m): # create 2d arrays\n closed_nodes_map.append(list(row))\n open_nodes_map.append(list(row))\n dir_map.append(list(row))\n \n pq = [[], []] # priority queues of open (not-yet-tried) nodes\n pqi = 0 # priority queue index\n # create the start node and push into list of open nodes\n n0 = node(self.xStart, self.yStart, 0.0, 0.0)\n n0.updatePriority(self.xFinish, self.yFinish)\n heappush(pq[pqi], n0)\n open_nodes_map[self.yStart][self.xStart] = n0.priority # mark it on the open nodes map\n \n # A* search\n while len(pq[pqi]) > 0:\n # get the current node w/ the highest priority\n # from the list of open nodes\n n1 = pq[pqi][0] # top node\n n0 = node(n1.xPos, n1.yPos, n1.distance, n1.priority)\n x = n0.xPos\n y = n0.yPos\n heappop(pq[pqi]) # remove the node from the open list\n open_nodes_map[y][x] = 0\n # mark it on the closed nodes map\n closed_nodes_map[y][x] = 1\n \n # quit searching when the goal state is reached\n if x == self.xFinish and y == self.yFinish:\n # Generate the path from finish to start by following the \n # directions.\n return self.Reconstruct_Path(dir_map)\n \n # generate moves (child nodes) in all possible directions\n for i in range(self.num_directions):\n new_x = x + self.dx[i]\n new_y = y + self.dy[i]\n Flag=True\n if not (new_x < 0 or new_x > self.n-1 or new_y < 0 or new_y > self.m - 1\n or self.MAP[new_y][new_x] == 1 or closed_nodes_map[new_y][new_x] == 1):\n # Check to see if the extended path runs through any obstacles\n if (abs(self.dx[i])>1 or abs(self.dy[i])>1):\n # Need to check that the path does not pass an object\n JumpCells=2*max(abs(self.dx[i]),abs(self.dy[i]))-1\n for K in range(1,JumpCells):\n YPOS=int(round(K*1.0*self.dy[i]/JumpCells))\n XPOS=int(round(K*1.0*self.dx[i]/JumpCells))\n if (self.MAP[y+YPOS][x+XPOS]==1):\n Flag=False\n if Flag: \n # generate a child node\n m0 = node(new_x, new_y, n0.distance, n0.priority)\n m0.calc_cost(self.dx[i], self.dy[i])\n m0.updatePriority(self.xFinish, self.yFinish)\n # if it is not in the open list then add into that\n if open_nodes_map[new_y][new_x] == 0:\n open_nodes_map[new_y][new_x] = m0.priority\n heappush(pq[pqi], m0)\n # mark its parent node direction\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n elif open_nodes_map[new_y][new_x] > m0.priority:\n # update the priority info\n open_nodes_map[new_y][new_x] = m0.priority\n # update the parent direction info\n dir_map[new_y][new_x] = (self.num_directions-i-1) % self.num_directions\n # replace the node\n # by emptying one pq to the other one\n # except the node to be replaced will be ignored\n # and the new node will be pushed in instead\n while not (pq[pqi][0].xPos == new_x and pq[pqi][0].yPos == new_y):\n heappush(pq[1 - pqi], pq[pqi][0])\n heappop(pq[pqi])\n heappop(pq[pqi]) # remove the wanted node\n # empty the larger size pq to the smaller one\n if len(pq[pqi]) > len(pq[1 - pqi]):\n pqi = 1 - pqi\n while len(pq[pqi]) > 0:\n heappush(pq[1-pqi], pq[pqi][0])\n heappop(pq[pqi]) \n pqi = 1 - pqi\n heappush(pq[pqi], m0) # add the better node instead\n return '','' # no route found", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def _find_routes(self, start_node, previous_nodes=None):\n if previous_nodes is None:\n previous_nodes = []\n\n routes = []\n for con in self.connections:\n if start_node == con.end:\n con.flip()\n if start_node == con.start:\n # if the connection ends in a box output,\n # add the connection (as a route of length 1)\n if con.end.is_box_output():\n routes.append([con])\n elif con.end.is_box_input():\n raise Exception(\"Route in connections detected, \"\n \"that ends at an input.\")\n elif con.end.is_switch_output():\n # check if there is conflict with previous nodes\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n # check orientation\n if con.end.switch.orientation == 1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n # Set orientation of the switch\n con.end.switch.orientation = -1\n # Add the node to the previous nodes and call the method\n # for the next node\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n next_step = self._find_routes(\n con.end.switch.input,\n previous_nodes=previous_nodes\n )\n # Merge the current connection with the resulting routes\n for route in next_step:\n routes.append([con] + route)\n # proceed the analogously for a switch input\n elif con.end.is_switch_input():\n if con.end.switch in previous_nodes:\n raise Exception(\"Loop detected in connections at\"\n f\"switch {con.end.switch}.\")\n if con.end.switch.orientation == -1:\n raise Exception(\"Conflicting switch orientation \"\n f\"for switch {con.end.switch}\")\n con.end.switch.orientation = 1\n if con.start.parent_type == 'switch':\n previous_nodes.append(con.start.switch)\n else:\n previous_nodes.append(con.start)\n\n # continue with both outputs\n next_step0 = self._find_routes(\n con.end.switch.output[0],\n previous_nodes=previous_nodes\n )\n\n next_step1 = self._find_routes(\n con.end.switch.output[1],\n previous_nodes=previous_nodes\n )\n\n for route in next_step0:\n routes.append([con] + route)\n for route in next_step1:\n routes.append([con] + route)\n\n else:\n raise TypeError(f\"Node {con.end} not recognised\")\n\n return routes", "def FindAllPaths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n if start not in graph:\n return None\n paths = []\n for node in graph[start]:\n if node not in path:\n newpaths = find_all_paths(graph, node, end, path)\n for newpath in newpaths:\n paths.append(newpath)\n return paths", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def plan_path(self, start_point, end_point, map_obj):\n # STUFF FOR TESTING \n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n self.vis_pub.publish(marker)\n \n exploration_bias = 1.0 - self.goal_bias\n final_node = None\n num_existing_path_points_added = 0\n \n self.rrt_star = RRTStar(Node(start_point))\n self.max_iterations = self.rrt_star.max_size\n while self.rrt_star.size <= self.max_iterations:\n p = np.random.uniform()\n if p < exploration_bias:\n \n x_rand = self.map.sample_free_space()\n else:\n if final_node is None:\n x_rand = end_point\n else:\n x_rand = self.branched_from_existing_path(\n final_node,\n depth_underestimate=num_existing_path_points_added\n )\n num_existing_path_points_added += 1\n\n x_nearest = self.rrt_star.nearest(x_rand) # Find the nearest node to x_rand\n\n path = self.map.generate_line_path(x_nearest.value, x_rand, eta=self.eta)\n if path is not None: # no obstacles between x_nearest and x_rand\n x_new = path[-1]\n X_nearby_connectable = self.find_nearby_connectable(x_nearest, x_new)\n\n cost_min, node_min = self.find_best_parent(X_nearby_connectable, x_new)\n\n X_nearby_connectable.remove(node_min) # Remove x_new's parent node from the list of nearby nodes so it is not considered for rewiring\n \n # Create the new node at x_new!\n node_new = self.rrt_star.add_config(node_min, x_new)\n \n if self.enable_vis:\n # FOR TESTING ONLY #\n # Code to publish marker for new node\n ###########################################################################################\n TEMP = Point()\n TEMP.x = x_new[0]\n TEMP.y = x_new[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n self.vis_pub.publish(marker)\n ###########################################################################################\n\n self.rewire(cost_min, node_new, X_nearby_connectable)\n \n if np.allclose(node_new.value, end_point, .05, 0) and (final_node is None):#np.array_equal(node_new.value, end_point):\n final_node = node_new\n # reduce exploration bias so that we reinforce the existing path\n exploration_bias = .5\n if VERBOSE:\n print(\"Path found!!!!\")\n print(final_node.cost)\n if rospy.get_time() - self.start_time > self.time_thresh:\n if VERBOSE:\n print(self.rrt_star.size)\n break\n\n \n if final_node is not None:\n if self.enable_vis:\n marker = Marker()\n marker.header.frame_id = \"/map\"\n marker.type = marker.POINTS\n marker.action = marker.ADD\n \n marker.scale.x = 0.1\n marker.scale.y = 0.1\n marker.points = []\n marker.colors = []\n def recur(node):\n if self.enable_vis:\n TEMP = Point()\n TEMP.x = node.value[0]\n TEMP.y = node.value[1]\n TEMP.z = .05\n marker.points.append(TEMP)\n \n TEMP = ColorRGBA()\n TEMP.r = 1\n TEMP.g = 0\n TEMP.b = 0\n TEMP.a = 1\n \n marker.colors.append(TEMP)\n \n \n self.trajectory.points.append([node.value[0], node.value[1]])\n parent = node.parent\n if parent is not None:\n recur(parent)\n recur(final_node)\n self.trajectory.points.reverse()\n if self.enable_vis:\n self.vis_pub.publish(marker)\n if VERBOSE:\n print (final_node.depth)\n else:\n if VERBOSE:\n print(\"No path found! Please try again.\")\n \n \n \n # publish trajectory\n self.traj_pub.publish(self.trajectory.toPoseArray())\n\n # visualize trajectory Markers\n self.trajectory.publish_viz()", "def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path", "def build_node_chains(self):\n\n self.node_chain_lookup = -np.ones(self.tri.npoints, dtype=np.int)\n self.node_chain_list = []\n\n node_chain_idx = 1\n\n self.node_chain_list.append([]) # placeholder for any isolated base-level nodes\n\n for node1 in self.node_high_to_low: \n if (self.node_chain_lookup[node1] != -1): \n continue\n\n junction, this_chain = self._node_walk_downhill(node1)\n\n if len(this_chain) > 1:\n self.node_chain_list.append(this_chain)\n \n self.node_chain_lookup[this_chain[0:-1]] = node_chain_idx \n if self.node_chain_lookup[this_chain[-1]] == -1:\n self.node_chain_lookup[this_chain[-1]] = node_chain_idx\n\n node_chain_idx += 1\n\n else: \n self.node_chain_list[0].append(this_chain[0])\n self.node_chain_lookup[this_chain[0]] = 0\n\n return", "def dfs_paths_dict_recur(\n graph: Mapping[Node, set[Node]],\n start: Node,\n goal: Node,\n path: Optional[list[Node]] = None\n) -> Iterable[list[Node]]:\n if path is None:\n path = [start]\n if start == goal:\n yield path\n else:\n for next_node in graph[start].difference(path):\n next_path = path + [next_node]\n yield from dfs_paths_dict_recur(graph, next_node, goal, next_path)", "def findPath(g, start, end, path=[]):\n path = path + [start]\n if start == end:\n return path\n if not start in g:\n return None\n for node in g[start]:\n if node not in path:\n newpath = findPath(g, node, end, path)\n if newpath: return newpath\n return None", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def generate_path(goal_node, visited):\n goal_state = goal_node['state']\n path = [goal_state]\n while goal_node['parent']:\n path.append(goal_node['state'])\n goal_node = visited[goal_node['parent']]\n return path", "def build_path(start, hi, lo, bgArray):\n dead_ends = 0\n offsets = [\n (0, 1),\n (1, 1),\n (1, 0),\n (1, -1),\n (0, -1),\n (-1, -1),\n (-1, 0),\n (-1, 1)\n ]\n visited = [start,]\n path = [start,]\n location = start\n while path != []:\n found = False\n for offset in offsets:\n neighbor = (location[0] + offset[0], location[1] + offset[1])\n if len(visited) > 1 and neighbor == start:\n # lArray[neighbor] = label\n # print(\"Dead ends: \", dead_ends)\n return (path, visited, dead_ends)\n if is_edge(neighbor, hi, lo, bgArray) and neighbor not in visited:\n # lArray[neighbor] = label\n visited.append(neighbor)\n path.append(neighbor)\n location = neighbor\n found = True\n break\n if not found:\n # Dead end found, re-trace steps\n # print(\"@@@DEAD END!\")\n dead_ends += 1\n path.pop()\n if len(path) > 0:\n location = path[len(path)-1]\n print(\"@@@Edge is not part of the path? What the?\")\n return ([],[], -1)", "def construct_path(node):\n path = []\n current = node\n while current is not None:\n path.append(current.position)\n current = current.parent\n return path", "def find_path_recursion(cls, start_point: Coordination, current_point: Coordination,\n path_list: list[Coordination] = None):\n if path_list is None:\n path_list = [current_point]\n\n # the robot will keep going parallel to x-axis until it hits the x point of the start point\n if current_point.x != start_point.x:\n next_y = current_point.y\n\n if current_point.x > start_point.x:\n next_x = current_point.x - 1\n else:\n next_x = current_point.x + 1\n\n next_coordination = Coordination(next_x, next_y)\n path_list.append(next_coordination)\n cls.find_path_recursion(start_point=start_point, current_point=next_coordination, path_list=path_list)\n\n # the robot will keep going parallel to y-axis until it hits the y point of the start point\n elif current_point.y != start_point.y:\n next_x = current_point.x\n\n if current_point.y > start_point.y:\n next_y = current_point.y - 1\n else:\n next_y = current_point.y + 1\n\n next_coordination = Coordination(next_x, next_y)\n path_list.append(next_coordination)\n cls.find_path_recursion(start_point=start_point, current_point=next_coordination, path_list=path_list)\n\n return path_list", "def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))" ]
[ "0.66372293", "0.5984706", "0.5966498", "0.5830176", "0.58110154", "0.58090067", "0.5781353", "0.577707", "0.5756183", "0.5688646", "0.56742686", "0.5663207", "0.5620114", "0.56037915", "0.5599367", "0.55866355", "0.5558341", "0.5553427", "0.5551647", "0.55495954", "0.55245346", "0.5511733", "0.54762167", "0.5463652", "0.54474235", "0.5445509", "0.5444622", "0.5434825", "0.53709906", "0.5356731" ]
0.7077985
0
Create a list of all the paths to be simplified between endpoint nodes. \ The path is ordered from the first endpoint, through the interstitial \ nodes, to the second endpoint. Please note this method is taken directly from OSMnx, and can be found in \
def get_paths_to_simplify(G: nx.Graph, strict: bool=True) -> List[List[int]]: # First identify all the nodes that are endpoints endpoints = set([node for node in G.nodes() if is_endpoint(G, node, strict=strict)]) # Initialize the list to be returned; an empty list paths_to_simplify = [] # For each endpoint node, look at each of its successor nodes for node in endpoints: for successor in G.successors(node): if successor not in endpoints: # if the successor is not an endpoint, build a path from the # endpoint node to the next endpoint node try: paths_to_simplify.append( build_path(G, successor, endpoints, path=[node, successor])) except RuntimeError: # Note: Recursion errors occur if some connected component # is a self-contained ring in which all nodes are not # end points handle it by just ignoring that # component and letting its topology remain intact # (this should be a rare occurrence). log(('Recursion error: exceeded max depth, moving on to ' 'next endpoint successor'), level=lg.WARNING) return paths_to_simplify
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def convert_paths(self):\n # convert to node sequences, dropping s'\n self.nodeseq_paths = []\n for path in self.paths:\n node_seq = [] # don't include s'\n for arc in path:\n node_seq.append(self.arc_info[arc]['destin'])\n self.nodeseq_paths.append(node_seq)\n # convert to og graph\n self.converted_paths = []\n for path in self.nodeseq_paths:\n this_path = []\n add_next_node = True\n for i in range(len(path) - 1):\n print(\"This path is\", this_path)\n node1 = path[i]\n node2 = path[i + 1]\n print(\"node1={}, node2={}\".format(node1, node2))\n if (node1, node2) in self.mapping:\n sc = self.mapping[(node1, node2)]\n print(\"uses sc edge for {}\".format(sc))\n print(\"should add {}, but also need to check for overlaps\".\n format(sc[1:-1]))\n if sc[1] in this_path:\n # we have an overlap\n start = len(this_path) - this_path.index(sc[1])\n this_path.extend(sc[start:-1])\n else:\n this_path.extend(sc[1:-1])\n add_next_node = False # next node is second of sc edge\n elif add_next_node:\n this_path.append(node1)\n else:\n add_next_node = True\n this_path.append(path[-1])\n self.converted_paths.append(this_path)", "def build_path(\n G: nx.Graph,\n node: int,\n endpoints: List[int],\n path: List[int]) -> List[int]:\n\n # For each successor in the passed-in node\n for successor in G.successors(node):\n if successor not in path:\n # If successor is already in path, ignore it, otherwise add to path\n path.append(successor)\n\n if successor not in endpoints:\n # If successor not endpoint, recursively call\n # build_path until endpoint found\n path = build_path(G, successor, endpoints, path)\n\n else:\n # If successor is endpoint, path is completed, so return\n return path\n\n if (path[-1] not in endpoints) and (path[0] in G.successors(path[-1])):\n # If end of the path is not actually an endpoint and the path's\n # first node is a successor of the path's final node, then this is\n # actually a self loop, so add path's first node to end of path to\n # close it\n path.append(path[0])\n\n return path", "def _find_all_paths(self, start_vertex: str, end_vertex: str, path=[]):\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n paths = []\n for vertex in self.graph[start_vertex]:\n if vertex not in path:\n extended_paths = self._find_all_paths(vertex,\n end_vertex,\n path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def shortestpaths(self, start, end, edgeweight=\"t_0\"):\n graph = self.graph\n shortest_nodepaths = list(\n nx.all_shortest_paths(\n graph, start, end, weight=edgeweight, method=\"dijkstra\"\n )\n )\n shortest_paths = []\n for path in shortest_nodepaths:\n edgepath = []\n for i in range(len(path) - 1):\n edgepath.append((path[i], path[i + 1]))\n shortest_paths.append(edgepath)\n\n return shortest_paths", "def get_path_list(path):\n # Build the path from end back to beginning.\n nodes = []\n prevevt = None\n while path is not None:\n nodes.append((path.node, path.cost, prevevt))\n prevevt = path.evt\n path = path.prev\n\n nodes.reverse()\n return nodes", "def _generate_subpaths(self):\n\n scale = self.SCALE\n\n for point in self._points:\n x_base = point[0] * scale + self.border * scale + self.line_size\n y_base = point[1] * scale + self.border * scale + self.line_size\n\n yield 'M {x0} {y0} L {x0} {y1} L {x1} {y1} L {x1} {y0} z'.format(\n x0=x_base,\n y0=y_base,\n x1=x_base + scale,\n y1=y_base + scale\n )", "def reconstruct_path(goal: Vector2D, prev_node: dict) -> list:\n path = []\n prev = prev_node[goal] # remove 'goal' from path\n \n while prev != None:\n path.append(prev)\n prev = prev_node[prev]\n \n path = path[:-1] # remove 'start' from path\n path.reverse()\n return path", "def _build_path(self):\r\n\r\n path = []\r\n \r\n for i in range(len(self.path) - 1):\r\n current_node = self.path[i]\r\n next_node = self.path[i + 1]\r\n \r\n key_list = [i for i in range(len(current_node.leaving_roads)) if current_node.leaving_roads[i].end == next_node]\r\n \r\n if len(key_list) == 0:\r\n raise Exception('ERROR (in gps._build_path()) : there is no route.')\r\n \r\n path.append(key_list[0])\r\n \r\n return path", "def extract_path(self):\n if self.extracted_path is not None:\n return self.extracted_path\n current = self\n path = []\n while current:\n path.append([current.end, current.path_cost])\n current = current.parent\n return list(reversed(path))", "def find_all_path(self, start_vertex, end_vertex, path=[]):\n\n graph = self.__graph_dict\n path = path + [start_vertex]\n if start_vertex == end_vertex:\n return [path]\n\n if start_vertex not in graph:\n return []\n\n paths = []\n for vertex in graph[start_vertex]:\n if vertex not in path:\n extended_paths = self.find_all_path(vertex, end_vertex,path)\n for p in extended_paths:\n paths.append(p)\n return paths", "def all_shortest_paths(self, start_node, end_node):\n s=self.min_dist(start_node,end_node)\n return self.all_paths(start_node,end_node,s,[])", "def __edgeRouter(self):\r\n def getEndpoint(nodeTuple, pointList, direction, isReversedEdge):\r\n \"\"\" Gets the nearest arrow endpoint. Handles edge reversal \"\"\"\r\n if((direction == 'start' and not isReversedEdge)\r\n or (direction == 'end' and isReversedEdge)): \r\n endNode = nodeTuple[0]\r\n if(isReversedEdge):\r\n ix = -2\r\n iy = -1\r\n else:\r\n ix = 0\r\n iy = 1\r\n else: \r\n endNode = nodeTuple[1]\r\n if(isReversedEdge):\r\n ix = 0\r\n iy = 1\r\n else:\r\n ix = -2 \r\n iy = -1 \r\n \r\n # Is it connected to a named port!?!\r\n if(endNode.isConnectedByNamedPort(edgeObject)):\r\n handler = endNode.getConnectedByNamedPortHandler(nodeTuple[2]) \r\n return dc.coords(handler)[:2]\r\n \r\n # Not a named port...\r\n return list(endNode.getClosestConnector2Point( endNode, pointList[ix], \r\n pointList[iy])) \r\n \r\n \r\n \r\n #todo: improve method for spline arrows + add comments + optimize?\r\n print '----------------Dummy Edge Routing-----------------'\r\n for dummyEdge in NodeWrapper.ID2LayerEdgeDict.keys():\r\n \r\n dummyList = NodeWrapper.ID2LayerEdgeDict[dummyEdge]\r\n dummyNode = dummyList[0]\r\n dummyChild = dummyNode.children.keys()[0]\r\n linkFlagList = dummyNode.children[dummyChild]\r\n \r\n # Real nodes at start/end of the edge\r\n edgeSourceNode = dummyNode.parents.keys()[0]\r\n edgeSourceNode = edgeSourceNode.getASGNode().graphObject_\r\n dummyNode = dummyList[-1]\r\n edgeTargetNode = dummyNode.children.keys()[0]\r\n #print 'Dummy edge number', dummyEdge,\r\n #print dummyList[0].parents.keys()[0].getName(), edgeTargetNode.getName()\r\n edgeTargetNode = edgeTargetNode.getASGNode().graphObject_\r\n nodeTuple = [edgeSourceNode, edgeTargetNode, None]\r\n \r\n # Some edges are internally reversed to break cycles, when drawing\r\n # this must be taken into account\r\n isReversedEdge = False\r\n edgesToRoute = []\r\n for linkNode, isReversed in linkFlagList:\r\n edgesToRoute.append(linkNode)\r\n if(isReversed):\r\n isReversedEdge = True\r\n \r\n # Get all the points the edge must pass through (sorted by layer order)\r\n dummyList.sort(lambda a, b: cmp(a.getLayer(), b.getLayer()))\r\n if(isReversedEdge):\r\n dummyList.reverse()\r\n sortedDummyRouteList = []\r\n for node in dummyList:\r\n sortedDummyRouteList += node.getEdgePosition()\r\n \r\n # Set the coordinates of the edge directly \r\n # This is complicated by the fact that AToM3 treats edges as two\r\n # segments that join poorly (for spline arrows)\r\n for edgeObject in edgesToRoute: \r\n dc = edgeObject.graphObject_.dc\r\n linkObj = edgeObject.graphObject_ \r\n tag = linkObj.tag\r\n \r\n if(isReversedEdge):\r\n inPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n else:\r\n inPoint = dc.coords( tag + \"1stSeg0\" )[:2]\r\n outPoint = dc.coords( tag + \"2ndSeg0\" )[:2]\r\n \r\n #print 'Dummy route', sortedDummyRouteList\r\n numPoints = len(sortedDummyRouteList) / 2\r\n # Add 2 extra control points for odd case (to make splines nice)\r\n if(numPoints % 2 == 1):\r\n if(numPoints == 1):\r\n center = sortedDummyRouteList\r\n else:\r\n start = sortedDummyRouteList[:numPoints - 1]\r\n end = sortedDummyRouteList[numPoints + 1:]\r\n center = sortedDummyRouteList[numPoints - 1:numPoints + 1]\r\n \r\n if(not isReversedEdge):\r\n newMid1 = [center[0], center[1] - 20]\r\n newMid2 = [center[0], center[1] + 20]\r\n else:\r\n newMid2 = [center[0], center[1] - 20]\r\n newMid1 = [center[0], center[1] + 20]\r\n \r\n \r\n if(numPoints == 1):\r\n sortedDummyRouteList = newMid1 + center + newMid2 \r\n else:\r\n sortedDummyRouteList = start + newMid1 + center + newMid2 + end\r\n centerIndex = numPoints - 1 + 2\r\n \r\n # Add 1 extra control point for even case (to make splines nice)\r\n else:\r\n start = sortedDummyRouteList[:numPoints]\r\n end = sortedDummyRouteList[numPoints:]\r\n center = [start[-2] + (end[0] - start[-2]) / 2, \r\n start[-1] + (end[1] - start[-1]) / 2]\r\n sortedDummyRouteList = start + center + end \r\n centerIndex = numPoints\r\n \r\n # Now I know where the center is... so lets move the center object\r\n # Is the edge object a hyperlink?\r\n if(len(edgeObject.in_connections_ + edgeObject.out_connections_) > 2):\r\n fromObjs = []\r\n for semObj in edgeObject.in_connections_:\r\n fromObjs.append(semObj.graphObject_)\r\n toObjs = []\r\n for semObj in edgeObject.out_connections_:\r\n toObjs.append(semObj.graphObject_)\r\n optimizerHyperLink(dc, linkObj, fromObjs, toObjs, 0, 0, 0, center )\r\n continue\r\n \r\n else:\r\n linkObj.moveTo(* center)\r\n \r\n # Go through the 2 segments in the link\r\n nodeTuple[2] = edgeObject\r\n for connTuple in linkObj.connections:\r\n itemHandler = connTuple[0]\r\n direction = connTuple[1]\r\n \r\n if( direction ): \r\n inPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'start', isReversedEdge)\r\n\r\n segCoords = inPoint + sortedDummyRouteList[:centerIndex+2]\r\n else: \r\n outPoint = getEndpoint(nodeTuple, sortedDummyRouteList,\r\n 'end', isReversedEdge) \r\n segCoords = sortedDummyRouteList[centerIndex:] + outPoint\r\n segCoords = self.__reverseCoordList(segCoords)\r\n \r\n # Applies the changed coords to the canvas\r\n dc.coords( * [itemHandler] + segCoords ) \r\n \r\n # This may change the associated link drawings: \r\n # move them to the new point \r\n if( direction ):\r\n linkObj.updateDrawingsTo(inPoint[0], inPoint[1], itemHandler, \r\n segmentNumber=1)\r\n else:\r\n linkObj.updateDrawingsTo(outPoint[0], outPoint[1], itemHandler, \r\n segmentNumber=2)", "def reconstruct_path(source, target, predecessors):\n if source == target:\n return []\n prev = predecessors[source]\n curr = prev[target]\n path = [target, curr]\n while curr != source:\n curr = prev[curr]\n path.append(curr)\n return list(reversed(path))", "def get_path(self,first_node,last_node):\n edge_pattern=re.compile('edge_(?P<begin_node>\\w+)_(?P<end_node>\\w+)_(?P<iterator>\\w+)')\n exit_paths=self.get_exiting_edges(first_node)\n next_nodes=self.get_exiting_nodes(first_node)\n #be careful here using the wrong assignment statement breaks this function\n possible_paths=[]\n for exit_path in exit_paths:\n possible_paths.append([exit_path])\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))\n for i in range(len(self.node_names)):\n for index,path in enumerate(possible_paths):\n last_edge=path[-1]\n match=re.match(edge_pattern,last_edge)\n begin_node=match.groupdict()['begin_node']\n end_node=match.groupdict()['end_node']\n #print next_node\n if end_node==last_node:\n #print(\"The path found is {0}\".format(path))\n return path\n next_possible_paths=[]\n next_edges=self.get_exiting_edges(end_node)\n next_nodes=self.get_exiting_nodes(end_node)\n #print(\"{0} is {1}\".format('next_edges',next_edges))\n for index,next_edge in enumerate(next_edges):\n #be careful here using the wrong assignment statement breaks this function\n #next_path=path is a deal breaker!!\n next_path=[]\n for edge in path:\n next_path.append(edge)\n #print(\"{0} is {1}\".format('next_path',next_path))\n #print(\"{0} is {1}\".format('next_edge',next_edge))\n #next_node=next_nodes[index]\n #print next_node\n next_match=re.match(edge_pattern,next_edge)\n next_node=next_match.groupdict()[\"end_node\"]\n begin_node_next_edge=next_match.groupdict()[\"begin_node\"]\n #print(\"{0} is {1}\".format('next_node',next_node))\n #print(\"{0} is {1}\".format('begin_node_next_edge',begin_node_next_edge))\n\n if next_node==last_node and begin_node_next_edge==end_node:\n next_path.append(next_edge)\n #print(\"The path found is {0}\".format(next_path))\n return next_path\n elif begin_node_next_edge==end_node:\n next_path.append(next_edge)\n next_possible_paths.append(next_path)\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n else:\n pass\n #print(\"{0} is {1}\".format('next_possible_paths',next_possible_paths))\n possible_paths=next_possible_paths\n #print(\"{0} is {1}\".format('possible_paths',possible_paths))", "def create_path(self):\n\n partials = []\n partials.append({})\n #print self.trip_id\n\n #this variable is true if we have not yet recorded the first edge of a path\n first_edge = True\n #this variable is false until we hit the midpoint\n hit_midpoint = False\n\n first_lasts = []\n first_lasts.append([0,0])\n matrices = []\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n edge_sets = []\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n cur_line = self.line_num\n good_graphs = []\n good_graphs.append(True)\n nodes_visited = []\n nodes_visited.append([])\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n matrices_index = 0\n prev_coords = (-1,-1)\n while normalized[0] == self.trip_id:\n lat = normalized[1]\n lon = normalized[2]\n coords = self.graph.gps_to_coords(lat,lon)\n node = self.graph.coords_to_node(coords[0],coords[1])\n\n if prev_coords == (-1,-1) and coords[0] != -1:\n first_lasts[matrices_index][0] = node\n\n if coords[0] == -1 and prev_coords[0] != -1:\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n\n if prev_coords != (-1,-1) and coords[0] != -1 and coords != prev_coords:\n edge_num = self.graph.edge_num(prev_coords[0],prev_coords[1],coords[0],coords[1])\n if edge_num == -1:\n good_graphs[matrices_index] = False\n else:\n edge_sets[matrices_index][edge_num] = 1\n if edge_num in partials[matrices_index] and partials[matrices_index][edge_num] == 0:\n del partials[matrices_index][edge_num]\n if not hit_midpoint:\n if first_edge:\n above = (prev_coords[0]-1,prev_coords[1])\n below = (prev_coords[0]+1,prev_coords[1])\n left = (prev_coords[0],prev_coords[1]-1)\n right = (prev_coords[0],prev_coords[1]+1)\n for next_coords in (above,below,left,right):\n other_edge = self.graph.edge_num(prev_coords[0],prev_coords[1],next_coords[0],next_coords[1])\n if other_edge != -1:\n partials[matrices_index][other_edge] = 0\n first_edge = False\n if self.graph.coords_to_node(prev_coords[0],prev_coords[1]) == self.midpoint:\n hit_midpoint = True\n partials[matrices_index][edge_num] = 1\n if self.graph.coords_to_node(coords[0],coords[1]) == self.midpoint:\n hit_midpoint = True\n\n\n\n if coords[0] == -1:\n matrices.append([np.zeros((self.graph.rows,self.graph.cols)),0])\n first_lasts.append([0,0])\n edge_sets.append([0 for i in range(self.graph.num_edges)])\n good_graphs.append(True)\n nodes_visited.append([])\n matrices_index += 1\n partials.append({})\n hit_midpoint = False\n first_edge = True\n \n elif coords[0] < self.graph.rows and coords[1] < self.graph.cols and not matrices[matrices_index][0][coords[0]][coords[1]]:\n matrices[matrices_index][1] += 1\n matrices[matrices_index][0][coords[0]][coords[1]] = 1\n nodes_visited[matrices_index].append(coords)\n\n prev_coords = coords\n\n cur_line += 1\n if cur_line == len(self.graph.lines):\n break\n #normalized = dg.normalize(self.graph.lines[cur_line])\n normalized = normalize_simple(self.graph.lines[cur_line])\n\n prev_node = self.graph.coords_to_node(prev_coords[0],prev_coords[1])\n first_lasts[matrices_index][1] = prev_node\n self.next_line = cur_line\n best_index = 0\n best_score = 0\n for matrix_index in range(len(matrices)):\n if matrices[matrix_index][1] > best_score:\n best_score = matrices[matrix_index][1]\n best_index = matrix_index\n\n for coords in nodes_visited[best_index]:\n self.graph.node_visit(self.trip_id,coords)\n \n\n if self.trip_id not in self.graph.trip_id2line_num:\n #if first_lasts[best_index] == [28,5]:\n # print \"a to b: %d\" % self.trip_id\n self.graph.first_last2trip_ids[tuple(first_lasts[best_index])].append(self.trip_id)\n\n return matrices[best_index][0],edge_sets[best_index],good_graphs[best_index],partials[best_index]", "def possible(self):\n return [tuple(path) for path in nx.all_shortest_paths(self._gpm.Graph, source=self.source, target=self.target)]", "def build_path(start, end):\n a = hierarchy.index(start)\n b = hierarchy.index(end)\n if a == b:\n return []\n elif a < b:\n return hierarchy[a + 1 : b + 1]\n return list(reversed(hierarchy[b:a]))", "def path(self):\n\t\tnode, path_back = self, []\n\t\twhile node:\n\t\t\tpath_back.append(node)\n\t\t\tnode = node.parent\n\t\treturn list(reversed(path_back))", "def bfs_paths(self, start: str, goal: str) -> List[Path]:\n queue = [(start, [start])]\n while queue:\n (node, path) = queue.pop(0)\n if node not in self.graph:\n yield []\n for _next in set(self.graph[node]) - set(path):\n if _next == goal:\n yield path + [_next]\n elif _next in self.graph:\n queue.append((_next, path + [_next]))", "def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted", "def path(self):\r\n node, p = self, []\r\n while node:\r\n p.append(node)\r\n node = node.parent\r\n yield from reversed(p)", "def find_all_path(self, start, end, path=[]):\n path = path+[start]\n if start == end:\n return path\n paths = []\n for node in self.graph[start]:\n if node not in path:\n newpaths = self.find_path(node, end, path)\n paths.append(newpaths)\n return paths", "def closed_paths(entities, vertices):\n # get a networkx graph of entities\n graph, closed = vertex_graph(entities)\n # add entities that are closed as single- entity paths\n entity_paths = np.reshape(closed, (-1, 1)).tolist()\n # look for cycles in the graph, or closed loops\n vertex_paths = nx.cycles.cycle_basis(graph)\n\n # loop through every vertex cycle\n for vertex_path in vertex_paths:\n # a path has no length if it has fewer than 2 vertices\n if len(vertex_path) < 2:\n continue\n # convert vertex indices to entity indices\n entity_paths.append(\n vertex_to_entity_path(vertex_path,\n graph,\n entities,\n vertices))\n\n return entity_paths", "def path(self):\n node, return_path = self, []\n while node:\n # Add the nodes in reverse order to a list until you reach the\n # root parent node which will terminate the loop\n return_path.append(node)\n node = node.parent\n # Reverse the list to get the proper path back\n return list(reversed(return_path))", "def constructShortestPath(self):\r\n sp = []\r\n v = self.t\r\n while self.preds[v]: # is not None\r\n sp.append(v)\r\n v = self.preds[v]\r\n sp.append(self.s) # source\r\n sp.reverse() # to have the path from source to dest and not t to s\r\n return sp, self.graph.getCoords(sp)", "def decompose_paths(self):\n if self.child_nodes == {}:\n return []\n\n import numpy as np\n\n def decompose_paths_rec(node_inner, path):\n \"\"\"\n This function does the recursive create_path of the decomposition\n :param node_inner:\n :param path:\n \"\"\"\n if node_inner.is_leaf():\n path = np.append(path, str(node_inner.value))\n return path[None]\n else:\n paths = np.array([])\n for edge_name in node_inner.child_nodes:\n new_path = np.append(path, str(edge_name))\n paths = np.append(paths, decompose_paths_rec(node_inner.child_nodes[edge_name], new_path))\n return paths\n\n decomposition = decompose_paths_rec(self, np.array([]))\n return decomposition.reshape((decomposition.shape[0]/(self.d+1), self.d+1))", "def calculate_paths(topology):\n nodes = topology['nodes']\n edges = topology['links']\n\n dist = [[len(nodes) + 1 for x in range(len(nodes))] for y in range(len(nodes))]\n paths = [[[] for x in range(len(nodes))] for y in range(len(nodes))]\n\n for e in edges.values():\n s, d = int(e['source']), int(e['target'])\n dist[s][d] = dist[d][s] = 1\n paths[s][d] = [e['id']]\n paths[d][s] = [e['id']]\n\n for k in range(len(nodes)):\n for i in range(len(nodes)):\n for j in range(len(nodes)):\n if dist[i][k] + dist[k][j] < dist[i][j]:\n dist[i][j] = dist[i][k] + dist[k][j]\n paths[i][j] = paths[i][k] + paths[k][j]\n return paths", "def find_all_paths(graph, start, end, path=[]):\n path = path + [start]\n if start == end:\n return [path]\n paths = []\n for node in graph[start]:\n newpaths = find_all_paths(graph, node, end, path)\n paths += newpaths\n return paths", "def _path(from_object, to_object):\n\n if from_object._root != to_object._root:\n raise ValueError(\"No connecting path found between \" +\n str(from_object) + \" and \" + str(to_object))\n\n other_path = []\n obj = to_object\n while obj._parent is not None:\n other_path.append(obj)\n obj = obj._parent\n other_path.append(obj)\n object_set = set(other_path)\n from_path = []\n obj = from_object\n while obj not in object_set:\n from_path.append(obj)\n obj = obj._parent\n index = len(from_path)\n i = other_path.index(obj)\n while i >= 0:\n from_path.append(other_path[i])\n i -= 1\n return index, from_path", "def path(self):\n node, path_back = self, []\n while node:\n path_back.append(node)\n node = node.parent\n return list(reversed(path_back))" ]
[ "0.66736597", "0.65876526", "0.64044726", "0.6344763", "0.6287056", "0.6267099", "0.6243408", "0.6209103", "0.61667985", "0.6163704", "0.61539465", "0.6153504", "0.61441976", "0.61239386", "0.6118913", "0.611599", "0.6104713", "0.6080712", "0.60661316", "0.6046239", "0.60459167", "0.6019914", "0.60191035", "0.60152674", "0.6003547", "0.5991242", "0.59912205", "0.5976045", "0.59692216", "0.59678894" ]
0.68498194
0
Archive a GIT project and upload it to Dash.
def deploy_project(name, apikey, changed_files=None, repo=None, branch='master'): zbuff = StringIO() if changed_files is not None: changed_files = list(set(changed_files) | REQUIRED_FILES) _archive_project(name, zbuff, changed_files, repo, branch) zbuff.reset() payload = {'apikey': apikey, 'project': name} req = requests.post( DASH_API_URL + 'as/import.json?version=portia', files=[('archive', ('archive', zbuff, 'application/zip'))], params=payload ) if req.status_code == 200: project_url = DASH_API_URL.rsplit('/', 2)[0] + '/p/' + name return { 'status': 'ok', 'schedule_url': project_url } else: raise DeployError('Deploy to Dash failed: %s' % req.text)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __gitCreateArchive(self):\n self.vcs.gitCreateArchive(self.project.getProjectPath())", "def upload_tar_from_git():\n require(\"release\", provided_by=[deploy])\n tree = prompt(\"Please enter a branch or SHA1 to deploy\", default=\"master\")\n local(\"git archive --format=tar %s | gzip > %s.tar.gz\" % (tree, env['release']))\n sudo(\"mkdir %(path)s/releases/%(release)s\" % env)\n put(\"%(release)s.tar.gz\" % env, \"%(path)s/packages/\" % env, use_sudo=True)\n sudo(\"cd %(path)s/releases/%(release)s && tar zxf ../../packages/%(release)s.tar.gz\" % env)\n local(\"rm %(release)s.tar.gz\" % env)", "def archive_projectbuild(projectbuild, archive):\n transport = get_transport_for_projectbuild(projectbuild, archive)\n transport.archive()", "def _archive_repository(\n owner: str, project_name: str, secret_token: str\n) -> Tuple[bool, str]:\n project_settings = {\"archived\": \"true\"}\n\n headers = {\n \"Authorization\": f\"token {secret_token}\",\n }\n\n url = f\"https://{REST_HOST}/repos/{owner}/{project_name}\"\n\n response = patch(url, json=project_settings, headers=headers, verify=VERIFY_CERT)\n return response.ok, (\n f\"Status: {response.status_code}. \" f'Error: \"{response.text}\".'\n )", "def archive(project, filename, pack_envs=False):\n return archiver._archive_project(project, filename, pack_envs)", "def deploy():\n\n project_dir = '/home/gastosabertos/gastos_abertos_website'\n with cd(project_dir):\n local('tar -cvzf build.tar.gz build')\n run('cp -r build build-old')\n put('build.tar.gz', '.')\n run('tar -xvf build.tar.gz')", "def deploy():\n build()\n collect()\n commit()\n push()", "def sync(args: argparse.Namespace) -> None:\n\tdel args\n\trepo_path = _find_repo()\n\tmanifest_file = os.path.join(repo_path, MANIFEST_DIRECTORY, storas.manifest.DEFAULT_MANIFEST_FILE)\n\tmanifest = storas.manifest.load(manifest_file)\n\tfor project in manifest.projects:\n\t\tfull_path = os.path.join(repo_path, \"..\", project.path)\n\t\tremote = project.remote\n\t\tfull_fetch_url = urllib.parse.urljoin(remote.fetch_host, project.name)\n\t\tif not os.path.exists(full_path):\n\t\t\tos.makedirs(full_path, exist_ok=True)\n\t\t\tLOGGER.debug(\"Created '%s'\", full_path)\n\t\t\t_run_git([\"clone\", \"-b\", project.revision, full_fetch_url], cwd=full_path)", "def upload_tar_from_git(path):\n require('release', provided_by=[prod])\n require('whole_path', provided_by=[prod])\n require('branch', provided_by=[prod])\n local('git checkout %s' % (env.branch))\n local('git archive --format=tar %s | gzip > %s.tar.gz' % (env.branch, env.release))\n sudo('mkdir -p %s' % (path))\n put('%s.tar.gz' % (env.release), '/tmp/', mode=0755)\n sudo('mv /tmp/%s.tar.gz %s/packages/' % (env.release, env.code_root))\n sudo('cd %s && tar zxf ../../../packages/%s.tar.gz' % (env.whole_path, env.release))\n local('rm %s.tar.gz' % (env.release))\n sudo('rm %s/packages/%s.tar.gz' % (env.code_root, env.release))", "def deploy():\n build()\n rsync_project(\n local_dir=os.path.abspath(env.config['destination']) + \"/\",\n remote_dir=env.remote_dir,\n delete=True,\n extra_opts='--exclude=\".DS_Store\"',\n )", "def git_project(soup, github_user, github_pass, github_repo, github_name):\n giturl = 'https://{user}:{password}@github.com/{user}/{repo}.git'.format(\n user=github_user, password=github_pass, repo=github_repo\n )\n oldcwd = os.getcwd()\n tmpdir = tempfile.mkdtemp()\n gitdir = os.path.join(tmpdir, github_repo)\n cmd = 'git clone {} {}'.format(shlex.quote(giturl), shlex.quote(gitdir))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(gitdir)\n rhinoscrape(soup, github_user, github_name)\n cmd = 'git add .'\n subprocess.run(shlex.split(cmd), check=False)\n msg = 'Project committed by Rhino Repo'\n cmd = 'git commit -m {}'.format(shlex.quote(msg))\n subprocess.run(shlex.split(cmd), check=False)\n cmd = 'git push {}'.format(shlex.quote(giturl))\n subprocess.run(shlex.split(cmd), check=False)\n os.chdir(oldcwd)\n shutil.rmtree(tmpdir, ignore_errors=True)", "def publish():\n fab.local(\"env/bin/python setup.py sdist\")\n tar_filename = fab.local(\n \"env/bin/python setup.py --fullname\", capture=True\n )\n dist_filename = \"dist/{}.tar.gz\".format(tar_filename)\n fab.put(dist_filename, PYREPO_DIR)", "def git_archive_all(path, archive_file_name):\n import os\n import tarfile\n\n def ls_files(prefix=''):\n \"\"\"\n Does a `git ls-files` on every git repository (eg: submodules)\n found in the working git repository and returns a list with all the\n filenames returned by each `git ls-files`\n\n --full-name Forces paths to be output relative to the project top\n directory\n --exclude-standard adds standard git exclusions\n (.git/info/exclude, .gitignore, ...)\n \"\"\"\n cmd = 'git ls-files --full-name --exclude-standard'\n raw_files = local(cmd, capture=True)\n files = []\n\n for filename in raw_files.split('\\n'):\n if (os.path.isdir(filename) and\n os.path.exists(os.path.join(filename, '.git'))):\n os.chdir(filename)\n files.extend(ls_files(prefix=filename))\n else:\n files.append(os.path.join(prefix, filename))\n\n return files\n\n cwd = os.getcwd()\n os.chdir(path)\n files = ls_files()\n os.chdir(path)\n project_tar = tarfile.open(archive_file_name, 'w:gz')\n\n for filename in files:\n project_tar.add(filename)\n\n project_tar.close()\n os.chdir(cwd)\n\n print(green('Archive created at %s/%s' % (path, archive_file_name)))", "def pack(**kwargs):\n require('repository')\n #if env.repository.startswith('svn://'):\n if env.repository.type == 'svn':\n execute(svn.pack, **kwargs)\n if env.repository.type == 'git':\n execute(git.pack, **kwargs)\n else:\n abort('Unsupported repository type %s' % env.repository)", "def deploy():\n archive_path = do_pack()\n if archive_path is None:\n print(\"pass\")\n return False\n return do_deploy(archive_path)", "def deploy_django_project(self):\n\n if self.no_files:\n return\n\n local_dir = \"{0}\".format(self.app_dir)\n app_dir = \"{0}\".format(self.app_remote_dir)\n\n if not exists(app_dir):\n mkdir(app_dir)\n\n zip_name = make_zip(local_dir, self.app_name)\n put(zip_name, self.app_remote_dir)\n\n with cd(self.app_remote_dir):\n run(\"unzip -o {0}\".format(zip_name))\n\n os.remove(zip_name)", "def pub_deploy(args, project=\"\", account=\"\", api_key=\"\"):\n base_url, api_key, updated = get_project_connect(\n 'djaodjin',\n base_url=DEFAULT_API_ENDPOINT,\n api_key=api_key)\n project, account, updated = get_project_account(\n project=project, account=account)\n if updated:\n save_config()\n\n api_container_url = \\\n \"%(base_url)s/api/containers/%(organization)s/apps/%(app)s/\" % {\n 'base_url': base_url,\n 'organization': str(account),\n 'app': str(project)}\n data = None\n container_location = args[0] if args else None\n if container_location:\n data = {'location': container_location}\n resp = requests.post(api_container_url, data=data, auth=(api_key, \"\"))\n LOGGER.info(\"POST %s returns %d %s\",\n api_container_url, resp.status_code, resp.text)", "def pack():\n clean_local()\n build()\n copy_json()\n optimize()\n tarball()", "def push(self) -> None:\n\n with ImportExtensions(required=True):\n import requests\n\n pkg_path = Path(self.args.path)\n if not pkg_path.exists():\n self.logger.critical(f'`{self.args.path}` is not a valid path!')\n exit(1)\n\n request_headers = self._get_request_header()\n\n try:\n # archive the executor package\n with TimeContext(f'Packaging {self.args.path}', self.logger):\n md5_hash = hashlib.md5()\n bytesio = archive_package(pkg_path)\n content = bytesio.getvalue()\n md5_hash.update(content)\n\n md5_digest = md5_hash.hexdigest()\n\n # upload the archived package\n form_data = {\n 'public': self.args.public if hasattr(self.args, 'public') else False,\n 'private': self.args.private\n if hasattr(self.args, 'private')\n else False,\n 'md5sum': md5_digest,\n 'force': self.args.force,\n 'secret': self.args.secret,\n }\n\n method = 'put' if self.args.force else 'post'\n\n hubble_url = get_hubble_url()\n # upload the archived executor to Jina Hub\n with TimeContext(\n f'Pushing to {hubble_url} ({method.upper()})',\n self.logger,\n ):\n resp = getattr(requests, method)(\n hubble_url,\n files={'file': content},\n data=form_data,\n headers=request_headers,\n )\n\n if 200 <= resp.status_code < 300:\n # TODO: only support single executor now\n image = resp.json()['executors'][0]\n\n uuid8 = image['id']\n secret = image['secret']\n visibility = image['visibility']\n\n info_table = [\n f'\\t🔑 ID:\\t\\t' + colored(f'{uuid8}', 'cyan'),\n f'\\t🔒 Secret:\\t'\n + colored(\n f'{secret}',\n 'cyan',\n )\n + colored(\n ' (👈 Please store this secret carefully, it wont show up again)',\n 'red',\n ),\n f'\\t👀 Visibility:\\t' + colored(f'{visibility}', 'cyan'),\n ]\n\n if 'alias' in image:\n info_table.append(f'\\t📛 Alias:\\t' + colored(image['alias'], 'cyan'))\n\n self.logger.success(f'🎉 Executor `{pkg_path}` is pushed successfully!')\n self.logger.info('\\n' + '\\n'.join(info_table))\n\n usage = (\n f'jinahub://{uuid8}'\n if visibility == 'public'\n else f'jinahub://{uuid8}:{secret}'\n )\n\n self.logger.info(f'You can use it via `uses={usage}` in the Flow/CLI.')\n elif resp.text:\n # NOTE: sometimes resp.text returns empty\n raise Exception(resp.text)\n else:\n resp.raise_for_status()\n except Exception as e: # IO related errors\n self.logger.error(\n f'Error while pushing `{self.args.path}` with session_id={request_headers[\"jinameta-session-id\"]}: '\n f'\\n{e!r}'\n )", "def deploy():\n _confirm_branch()\n \n require('settings', provided_by=[production, staging])\n require('branch', provided_by=[stable, master, branch])\n \n with settings(warn_only=True):\n maintenance_up()\n \n checkout_latest()\n gzip_assets()\n deploy_to_s3()\n maintenance_down()", "def build_and_deploy():\n\n with shell_env(TZ=_get_timezone()):\n _create_output_branch()\n _build_html()\n _git_commit_all()\n _git_push(_get_output_branch())", "def archive(\n self,\n ostream: Union[TextIO, BinaryIO],\n treeish: Optional[str] = None,\n prefix: Optional[str] = None,\n **kwargs: Any,\n ) -> Repo:\n if treeish is None:\n treeish = self.head.commit\n if prefix and \"prefix\" not in kwargs:\n kwargs[\"prefix\"] = prefix\n kwargs[\"output_stream\"] = ostream\n path = kwargs.pop(\"path\", [])\n path = cast(Union[PathLike, List[PathLike], Tuple[PathLike, ...]], path)\n if not isinstance(path, (tuple, list)):\n path = [path]\n # end assure paths is list\n self.git.archive(\"--\", treeish, *path, **kwargs)\n return self", "def deploy():\n build()\n copy()\n install()", "def push_sources():\n ensure_src_dir()\n push_rev = getattr(env, 'push_rev', None)\n if push_rev is None:\n push_rev = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n local(\"git tag -a {0} -m \\\"Tagged for release\\\"\".format(push_rev))\n local(\"git push origin master --tags\")\n\n with cd(SRC_DIR):\n run(\"git pull origin master\")\n run(\"git fetch -t\")\n run(\"git checkout {0}\".format(push_rev))", "def upload():\n run('mkdir -p /srv/images/'+env.project_name+'/')\n rsync_project(\n env.project_dir, './',\n exclude=(\n '.git', '.gitignore', '__pycache__', '*.pyc', '.DS_Store', 'environment.yml',\n 'fabfile.py', 'Makefile', '.idea', 'bower_components', 'node_modules',\n '.env.example', 'README.md', 'var'\n ), delete=True)", "def deploy():\n new_archive = do_pack()\n\n if new_archive is None:\n return False\n\n res = do_deploy(new_archive)\n return res", "def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )", "def upload(self, request, pk=None):\n app = self.get_object()\n deployment = Revision()\n deployment.compressed_archive = request.FILES['file']\n deployment.app = app\n deployment.save()\n app.deploy()\n response = {}\n return Response(response)", "def push ():\n\n tagname = get_tag (comp_versions, 'ACE')\n\n if opts.push:\n if opts.take_action:\n vprint (\"Pushing ACE_TAO\", opts.ace_tao_branch, \"to origin\")\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin \" + opts.ace_tao_branch)\n\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n ex (\"cd $DOC_ROOT/ACE_TAO && git push origin tag \" + tagname)\n\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n ex (\"cd $DOC_ROOT/MPC && git push origin tag \" + tagname)\n\n # Push release branches\n latest_branch_helper (push_latest_branch, opts.release_type)\n else:\n vprint (\"Pushing tag %s on ACE_TAO\" % (tagname))\n vprint (\"Pushing tag %s on MPC\" % (tagname))\n print (\"Pushing tags:\\n\")\n print (\"Pushing tag \" + tagname + \"\\n\")", "def archive(ctx, config):\n log.info('Creating archive directory...')\n archive_dir = misc.get_archive_dir(ctx)\n run.wait(\n ctx.cluster.run(\n args=[\n 'install', '-d', '-m0755', '--', archive_dir,\n ],\n wait=False,\n )\n )\n\n try:\n yield\n except Exception:\n # we need to know this below\n set_status(ctx.summary, 'fail')\n raise\n finally:\n passed = get_status(ctx.summary) == 'pass'\n if ctx.archive is not None and \\\n not (ctx.config.get('archive-on-error') and passed):\n log.info('Transferring archived files...')\n logdir = os.path.join(ctx.archive, 'remote')\n if (not os.path.exists(logdir)):\n os.mkdir(logdir)\n for rem in ctx.cluster.remotes.iterkeys():\n path = os.path.join(logdir, rem.shortname)\n misc.pull_directory(rem, archive_dir, path)\n # Check for coredumps and pull binaries\n fetch_binaries_for_coredumps(path, rem)\n\n log.info('Removing archive directory...')\n run.wait(\n ctx.cluster.run(\n args=[\n 'rm',\n '-rf',\n '--',\n archive_dir,\n ],\n wait=False,\n ),\n )" ]
[ "0.75925666", "0.6769014", "0.6449582", "0.6398534", "0.6263004", "0.6191235", "0.6138252", "0.61108285", "0.60195297", "0.60129213", "0.5945884", "0.5934544", "0.59011316", "0.58871365", "0.5878439", "0.586114", "0.58004034", "0.57955575", "0.5776379", "0.5756278", "0.5727968", "0.57231957", "0.5685913", "0.5675735", "0.56729144", "0.56666195", "0.5657752", "0.5652568", "0.56432843", "0.56374943" ]
0.72552186
1
Search existing spider names in a project
def search_spider_names(project, apikey, name=''): payload = {'project': project, 'apikey': apikey, 'spider': name} req = requests.get(DASH_API_URL + 'spiders/list.json', params=payload) if req.status_code == 200: return [s.get('id') for s in req.json().get('spiders', [])] return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def dorkScanner():\n pysearch.PySearch()\n openfile = open(\"sites.txt\", 'r')\n urls = openfile.read()\n openfile.close()\n return urls", "def _search(self, log, progressbar):\n self._urls = []\n for filename in os.listdir(self._path):\n url = 'file:////' + filename\n self._urls.append(url)\n self._urls.sort()", "def addCrawler(name):\n global allCrawlerNames\n if name == 'scihub':\n allCrawlers.append(ScihubCrawler())\n allCrawlerNames = [ c.name for c in allCrawlers ]", "def search(name):\n try:print(f'Searching for {name}...');os.system(f'python -m pip search {name}')\n except Exception as e:print(\"something went wrong\\n{e}\")", "def go_search(self, driver, pid):\n return [self.search_url(website, pid)]", "def test_search_checkname(self):\n self.assertEquals(self.t['Scrubs'].search('my first')[0]['episodename'], 'My First Day')\n self.assertEquals(self.t['My Name Is Earl'].search('Faked His Own Death')[0]['episodename'], 'Faked His Own Death')", "def query_project(self):\n\n # Find stylesheets.\n found = False\n for filename in self.project.namelist():\n if os.path.basename(filename) == 'styles.xml':\n found = True\n print(filename)\n if not found:\n print(\"not found!\")", "def test_search_project(self):\n title = Project.search_project(\"dee\")\n self.assertTrue(len(title) > 0)", "def search(self, name: str) -> \"Navaids\":\n return self.__class__(\n self.data.query(\n \"description == @name.upper() or name == @name.upper()\"\n )\n )", "def search():\n pass", "def main():\n domain_list = []\n base_url = \"http://localhost:9200/latest-tor/_search?pretty&size=9000&_source=title,domain\"\n keywords_list = ['preteen', 'loli', 'lolita', 'jailbait', 'pthc', 'best cp',\n '\"child porn\"', '\"kid porn\"', '\"child sex\"', '\"cp video\"',\n '\"nude children\"', '\"cp porn\"', '\"free child porn\"', 'kinderporn',\n '\"child rape\"', '\"toddler porn\"', '\"kids videos\"', '\"cp videos\"',\n 'lolilust', '\"pedo porno\"', '\"pedo content\"', 'underage', '\"cp pack\"',\n 'loliporn', 'pedofamily', '\"cp database\"', '\"pedo webcams\"', 'lolitacity']\n '\"xxx child\"', '\"xxx underage\"', '\"young forbidden\"']\n search_terms = []\n for index, term in enumerate(keywords_list):\n search_terms.append(term)\n if len(search_terms) >= 10 or index + 1 == len(keywords_list):\n url = base_url + \"&q=(\" + \" OR \".join(search_terms).replace(\" \", \"%20\") + \")\"\n search(url, domain_list)\n search_terms = []", "def parse_search_page(self, response):\n ###############################################################\n search_name_url_xpath = '//*[@id=\"dnn_dnnLEFTMENU_RadPanel1\"]/ul/li/div/ul/li[2]/a/@href'\n ###############################################################\n search_name_url = response.xpath(search_name_url_xpath).extract_first()\n yield scrapy.Request(response.urljoin(search_name_url), callback = self.parse_search_name_page)", "def spiders(args):\n _projects = lib.get_projects(\n args.target, args.project, username=args.username, password=args.password\n )\n for project in _projects:\n project_spiders = lib.get_spiders(\n args.target, project, username=args.username, password=args.password\n )\n if not args.verbose:\n print(f\"{project}:\")\n if project_spiders:\n print(indent(\"\\n\".join(project_spiders), INDENT_PREFIX))\n else:\n print(INDENT_PREFIX + \"No spiders.\")\n elif project_spiders:\n print(\"\\n\".join(f\"{project} {x}\" for x in project_spiders))", "def __call__(self, *paths):\n\n for item in self.site.items:\n if item.is_page() and item.match(*paths):\n yield item", "def search_entries(search):\n _, filenames = default_storage.listdir(\"entries\")\n result = []\n for filename in filenames: \n if filename.endswith(\".md\"):\n nameonly = re.sub(r\"\\.md$\", \"\", filename)\n \n if nameonly.lower() == search.lower():\n #print(\"name only :\", nameonly)\n #print(\"search :\", search)\n return (nameonly)\n elif search.lower() in nameonly.lower():\n result.append(nameonly)\n return(result)", "def test_search_in_name(self, es_with_collector, name_term, matched_company_name):\n CompanyFactory(\n name='whiskers and tabby',\n trading_names=['Maine Coon', 'Egyptian Mau'],\n )\n CompanyFactory(\n name='1a',\n trading_names=['3a', '4a'],\n )\n es_with_collector.flush_and_refresh()\n\n url = reverse('api-v3:search:basic')\n response = self.api_client.get(\n url,\n data={\n 'term': name_term,\n 'entity': 'company',\n },\n )\n\n match = Company.objects.filter(name=matched_company_name).first()\n if match:\n assert response.data['count'] == 1\n assert len(response.data['results']) == 1\n assert response.data['results'][0]['id'] == str(match.id)\n assert [{'count': 1, 'entity': 'company'}] == response.data['aggregations']\n else:\n assert response.data['count'] == 0\n assert len(response.data['results']) == 0", "def search(self, title):\n close_matches = self.get_close_matches_by_title(title)\n count = 0\n for item in self.item_list.values():\n if item.title in close_matches:\n print(item)\n count += 1\n if count == 0:\n print(\"No result found.\")", "def finddocname(string):\r\n for x in doclist:\r\n foundvar = f\"-->Doc name = {x.title()}\"\r\n if x in string:\r\n print(foundvar)\r\n break", "def filter_projects():\n with open('../results/01.crawling/01.project_ci_services.json', 'r') as infile:\n projects = json.load(infile)\n tr_projects = []\n for project, value in projects.items():\n if \"GitHub\" in value or \"Travis\" in value:\n tr_projects.append(project)\n return tr_projects", "def run_search(self, links):\n for s in links:\n self._run_command(\" s \\\"{}\\\" \\n\".format(s))", "def find(self, egg):", "def name_search(self, search):\n if isinstance(search, str):\n name_re = re.compile(search)\n else:\n name_re = search\n matches = [\n entry\n for entry in self\n if entry is not None and name_re.search(entry.name)\n ]\n return matches", "def runSpider(spider, searchterm = None, fullink = None, spiderbotid = -1):\n sclogic.runSpider(spider, searchterm, fullink, spiderbotid)", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def get_items_to_find(self):\n self.items_to_find = ['sole', 'farina', 'innaffiatoio']", "def find_with_deps(self, package_names):", "def search(self, term):", "def search(self, *args, **kwargs):", "def __init__(self, name=\"\"):\n super().__init__(\"search\", name)", "def search_service(self, name_filter):\n rs=search_service(name_filter)\n for el in rs:\n print(el)" ]
[ "0.56716776", "0.54362226", "0.54334825", "0.53485537", "0.53356576", "0.53022844", "0.52963454", "0.5276801", "0.5274018", "0.5273827", "0.5265692", "0.5207114", "0.51662695", "0.5152858", "0.514631", "0.512449", "0.5115938", "0.5112054", "0.51114047", "0.51075816", "0.5092334", "0.50902075", "0.5086198", "0.50772893", "0.5060954", "0.50554353", "0.5053591", "0.50406694", "0.5028122", "0.50260806" ]
0.7393245
0
Download a zipped project from Dash.
def _download_project(name, apikey): payload = {'apikey': apikey, 'project': name, 'version': 'portia'} r = requests.get(DASH_API_URL + 'as/project-slybot.zip', params=payload) return r.content
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_data():\n url = 'https://www.dropbox.com/s/h9ubx22ftdkyvd5/ml-latest-small.zip?dl=1'\n urllib.request.urlretrieve(url, 'ml-latest-small.zip')\n zfile = zipfile.ZipFile('ml-latest-small.zip')\n zfile.extractall()\n zfile.close()", "def x_download():\n\t#_loadconfig()\n\tconf = _get_config()\n\t#print conf['xplane']\n\tdownload_url = conf['xplane']['download']\n\tlocal(\"wget -P %s %s\" % (navimport.conf.work_dir(\"/xplane_zips\"), download_url))", "def download_project_archive(request, **kwargs):\n project = kwargs.get(\"project\")\n if request.user.is_authenticated and request.user == project.user:\n filename = project.create_downloadable_archive()\n file_handle = open(filename, \"rb\")\n response = FileResponse(file_handle)\n\n response[\"Content-Length\"] = os.path.getsize(filename)\n response[\n \"Content-Disposition\"\n ] = 'attachment; filename=\"{}.zip\"'.format(project.name)\n\n return response\n else:\n raise PermissionDenied", "def download(self):\n logger.info(f\"downloading project {self}\")\n self.project.storage.download(f\"{self.path}/releasemanifest\", None)\n self.extract()", "def download_zip(self, path: Path) -> Path:\n if not self.url:\n raise ValueError(\"Release must have a valid url to download the zip.\")\n\n with requests.get(self.url, stream=True) as response:\n with open(path, \"wb\") as download_file:\n shutil.copyfileobj(response.raw, download_file)\n\n return path", "def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None", "def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)", "def download():\n env_banner()\n\n download_data = Download()\n download_data()\n click.echo('Download done.')", "def download():\n base_loc = DATA_DIR + '/raw/human_activity'\n loc = base_loc + '/human_activity.zip'\n if os.path.exists(loc):\n print('Path already exists at {}. If you wish to re-download you must delete this folder.'.format(loc))\n return\n if not os.path.exists(base_loc):\n os.mkdir(base_loc)\n\n url = 'https://archive.ics.uci.edu/ml/machine-learning-databases/00341/HAPT%20Data%20Set.zip'\n urllib.request.urlretrieve(url, loc)\n\n with zipfile.ZipFile(loc, 'r') as zip_ref:\n zip_ref.extractall(base_loc)", "def download_dependency_url(name, url, temp_path, build_path, config, zip=True):\n parsed = urlparse(url)\n fn = os.path.basename(parsed.path)\n target_name = os.path.join(temp_path, fn)\n logger.info(f\"Downloading {url} to {target_name}\")\n\n download_file(url, target_name)\n\n if zip:\n with zipfile.ZipFile(target_name, \"r\") as z:\n z.extractall(build_path)\n else:\n shutil.copy(target_name, os.path.join(build_path, \"GameData\"))", "def download(uri: str) -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # create destination dirs\n destination = project_dir / 'data' / 'raw'\n destination.mkdir(exist_ok=True, parents=True)\n\n # download the file\n urllib.request.urlretrieve(uri, destination / \"original.zip\")", "def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)", "def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return", "def fetch_zenodo(self):\n\n # retrieve content from URL\n try:\n logging.info(f\"Downloading example data from {self.url}\")\n r = requests.get(self.url, stream=True)\n with io.BytesIO() as stream:\n with tqdm.wrapattr(\n stream,\n 'write',\n file=sys.stdout,\n miniters=1,\n desc=self.url,\n total=int(r.headers.get('content-length', 0))\n ) as file:\n for chunk in r.iter_content(chunk_size=4096):\n file.write(chunk)\n with zipfile.ZipFile(stream) as zipped:\n # extract each file in the zipped dir to the project\n for f in zipped.namelist():\n logging.info(\"Unzipped: {}\".format(os.path.join(self.destination, f)))\n zipped.extract(f, self.destination)\n\n logging.info(\"Download and install complete.\")\n\n self.close_logger()\n\n except requests.exceptions.MissingSchema:\n msg = f\"Unable to download data from {self.url}\"\n logging.exception(msg)\n self.close_logger()\n raise", "def download(self):\n cmd = mccli() + \" d f \" + self.localpath + \" -p \" + self.project.name\n \n set_cli_remote(self.project.remote)\n \n child = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n out, err = child.communicate()\n return CLIResult(out, err, child.returncode)", "def _download(self):\n self._system.download(\"http://geant4.web.cern.ch/geant4/support/source/\" + self._tar_name)", "def download(project, unpack=True, project_dir=None, parent_dir=None, site=None, username=None, token=None):\n download_status = client._download(project,\n project_dir=project_dir,\n parent_dir=parent_dir,\n site=site,\n username=username,\n token=token)\n if unpack and download_status:\n unpack_status = unarchive(download_status.filename, project_dir=project_dir, parent_dir=parent_dir)\n if unpack_status:\n print(unpack_status.status_description)\n return download_status", "def download():\n raise NotImplementedError", "def _download_from_web(*, ds_name: str, ds_path: Path):\n import cgi\n import zipfile\n import httpx\n from tqdm import tqdm\n\n url = DATASET_OPTIONS[ds_name]['web']\n if ds_path.exists():\n print('Dataset directory already exists; remove it if you wish to '\n 're-download the dataset')\n return\n\n ds_path.mkdir(parents=True, exist_ok=True)\n\n with httpx.Client() as client:\n with client.stream('GET', url=url) as response:\n if not response.is_error:\n pass # All good!\n else:\n raise RuntimeError(\n f'Error {response.status_code} when trying '\n f'to download {url}')\n\n\n header = response.headers['content-disposition']\n _, params = cgi.parse_header(header)\n # where to store the archive\n outfile = ds_path / params['filename']\n remote_file_size = int(response.headers['content-length'])\n\n with open(outfile, mode='wb') as f:\n with tqdm(desc=params['filename'], initial=0,\n total=remote_file_size, unit='B',\n unit_scale=True, unit_divisor=1024,\n leave=False) as progress:\n num_bytes_downloaded = response.num_bytes_downloaded\n\n for chunk in response.iter_bytes():\n f.write(chunk)\n progress.update(response.num_bytes_downloaded -\n num_bytes_downloaded)\n num_bytes_downloaded = (response\n .num_bytes_downloaded)\n\n assert outfile.suffix == '.zip'\n\n with zipfile.ZipFile(outfile) as zip:\n for zip_info in zip.infolist():\n path_in_zip = Path(zip_info.filename)\n # omit top-level directory from Zip archive\n target_path = str(Path(*path_in_zip.parts[1:]))\n if str(target_path) in ('.', '..'):\n continue\n if zip_info.filename.endswith('/'):\n (ds_path / target_path).mkdir(parents=True, exist_ok=True)\n continue\n zip_info.filename = target_path\n print(f'Extracting: {target_path}')\n zip.extract(zip_info, ds_path)\n\n outfile.unlink()", "def download():\n try:\n response = send_from_directory(\n app.config.get(\"DATA_DIR\"), \"whiteboard.zip\", as_attachment=True\n )\n\n # change headers to stop browser from delivering cached version\n response.headers[\"Last-Modified\"] = datetime.now()\n response.headers[\n \"Cache-Control\"\n ] = \"no-store, no-cache, must-revalidate, post-check=0, pre-check=0, max-age=0\"\n response.headers[\"Pragma\"] = \"no-cache\"\n response.headers[\"Expires\"] = \"-1\"\n\n return response\n\n except:\n return traceback.format_exc()", "def download(urls, dest_folder):\n pass", "def download(ctx: click.Context, **kwargs):\n root_commands.cmd_download(ctx.obj, **kwargs)", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def download(url, to):\n filename = url.rstrip('/').split('/')[-1] + '.zip'\n r = requests.get(url, stream=True)\n\n outpath = os.path.join(to, filename)\n\n with open(outpath, 'wb') as fd:\n for chunk in r.iter_content(1024 * 1024):\n fd.write(chunk)\n\n return outpath", "def download_and_unzip_dataset(url, path):\n dl = urllib.urlretrieve(url)\n zf = zipfile.ZipFile(dl[0])\n zf.extractall(path)\n return zf", "def download_data():\n\n if not os.path.exists(zipfile_path):\n print(f'Downloading {config.download_url} to {zipfile_path}')\n urlretrieve(config.download_url, zipfile_path)\n print(f'Successfully downloaded {zipfile_path}')\n\n zip_ref = ZipFile(zipfile_path, 'r')\n zip_ref.extractall(config.raw_data_dir)\n zip_ref.close()\n\n os.rename(f\"{config.raw_data_dir}/cornell movie-dialogs corpus\", extracted_dir)", "def download_dataset():\n \n ID = \"1-3_oB5iSF-c_V65-uSdUlo024NzlgSYZ\"\n script1 = f\"\"\"\n wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id='{ID} -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=\"{ID} -O Data.zip && rm -rf /tmp/cookies.txt\n \"\"\"\n script2 = \"\"\"unzip Data.zip\"\"\"\n\n os.system(script1)\n os.system(script2)", "def download_archive(self):\n\n def time_convert(structure):\n \"\"\"\n :param structure: tuple representation of time\n :return: GitHub archive time\n \"\"\"\n \n \n join_number_to_zero = lambda number: (\"\" if number > 9 else \"0\") + str(number)\n\n return \"%s-%s-%s-%s\" % (\n structure.tm_year, join_number_to_zero(structure.tm_mon), join_number_to_zero(structure.tm_mday),\n structure.tm_hour)\n\n current_time = self.get_time()\n self.logger.debug(__name__ + \": \" + \"current time: \" + str(gmtime(current_time)))\n\n difference = -25200\n #timezone difference in seconds between GMT and west coast of USA\n\n downloading_time = int(timegm(self.config[\"last_connection_time\"])) + 3600\n self.logger.debug(__name__ + \": \" + \"downloading time: \" + str(gmtime(downloading_time)))\n\n if downloading_time > current_time - 7200:\n self.logger.info(__name__ + \": \" + \"unable to download file (time limiting).\")\n return\n\n downloading_time += difference\n\n json_file_name = self.download_file(time_convert(gmtime(downloading_time)))\n\n self.config[\"last_connection_time\"] = gmtime(downloading_time - difference)\n self.logger.debug(__name__ + \": \" + \"last_connection_time: \" + str(self.config[\"last_connection_time\"]))\n\n return json_file_name", "def unzip() -> None:\n logger = logging.getLogger(__name__)\n logger.info('Download the dataset')\n\n # define the destination\n destination = project_dir / 'data' / 'raw'\n\n # extract zip\n zip_file = ZipFile(destination / \"original.zip\")\n zip_file.extractall(destination)", "def download():\n\treturn response.download(request, db)" ]
[ "0.6970031", "0.6887985", "0.68189776", "0.681829", "0.6720921", "0.66710734", "0.65784806", "0.65546376", "0.6551284", "0.6536828", "0.64772105", "0.63660634", "0.63608265", "0.635872", "0.634232", "0.63057834", "0.630051", "0.6292988", "0.6290236", "0.62840354", "0.62497574", "0.6212216", "0.61953944", "0.61892396", "0.61676174", "0.6166975", "0.615335", "0.6149776", "0.61192924", "0.6110998" ]
0.7049751
0
Convert to front facing coordinates
def get_front_facing_xz(self): yaw_radian = math.radians(self.cur_rotation) return cam.step * math.sin(yaw_radian) * math.cos(0), cam.step * math.cos( yaw_radian) * math.cos(0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def frontFace(self):\n\n if not self.threedee:\n return gl.GL_CCW\n\n # Only looking at the mesh -> display\n # transform, thus we are assuming that\n # the MVP matrix does not have any\n # negative scales.\n xform = self.opts.getTransform('mesh', 'display')\n\n if npla.det(xform) > 0: return gl.GL_CCW\n else: return gl.GL_CW", "def _find_front(self):\n self.front = (laplace(self.working_mask) > 0).astype('uint8')\n # TODO: check if scipy's laplace filter is faster than scikit's", "def pareto_frontier(self) -> Tuple[Tensor, Tensor]:\n raise NotImplementedError(\"Pareto frontier not yet implemented.\")", "def get_forehead(self):\n\t\tx,y,w,h = self.get_face()\n\t\t\n\t\tx += w * self.fh_x\n\t\ty += h * self.fh_y\n\t\tw *= self.fh_w\n\t\th *= self.fh_h\n\t\t\n\t\tx -= (w / 2.0)\n\t\ty -= (h / 2.0)\n\t\t\n\t\treturn tuple(map(int, (x,y,w,h)))", "def front(self):\n return _osgAnimation.vectorFloatKeyframe_front(self)", "def startface(self):\n self.fan = (self.position.x,self.position.y,self.position.z)", "def front(self):\n return _osgAnimation.vectorMatrixKeyframe_front(self)", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def rotateXOut(self):\n MV = self.MV\n MV[:3, 2] = 1, 0, 0 # 3rd col is normal vector, make it point along x axis\n # set top left and top middle values to zero:\n MV[0, 0] = 0\n MV[0, 1] = 0\n b = MV[2, 0] # grab bottom left value\n a = np.sqrt(1 - b**2) # calc new complementary value to get normalized vectors\n #if MV[1, 0] < 0:\n # a = -a # keep a -ve, reduce jumping around of axes\n MV[1, 0] = a\n MV[2, 1] = a\n MV[1, 1] = -b # needs to be -ve of MV[2, 0]\n self.MV = MV", "def isotropic_correction_front(self):\n return self.cartesian_map_array(self.IsotropicCorrection(self,'front'))", "def front(self):\n return _osgAnimation.vectorQuatKeyframe_front(self)", "def facing(self) -> Union[int, float]:\n return self.proto.facing", "def screenToCamera(self,x,y):\n #self.x = x\n #self.y = y\n new_x = x / (self.surf.get_width() - 1) - 0.5\n #-(new_x)\n new_y = y / (self.surf.get_height() - 1)\n new_y = (1.0 - cy) - 0.5\n new_z = -self.camNear\n formula = math3dsol.VectorN((new_x,new_y,new_z))\n return formula\n\n # FINISH ME!!!", "def initialCoordinates():\r\n return (-250,-250)", "def moveFront(self, face, l):\n nlist = [[x[i] for x in face] for i in range(len(face[0]))]\n for row in range(self.size):\n for col in range(self.size):\n if col >= int((self.size) / 2):\n break\n else:\n buffer_ = nlist[row][col]\n nlist[row][col] = nlist[row][self.size - 1 - col]\n nlist[row][self.size - 1 - col] = buffer_\n if l == 'f':\n self.front = nlist\n elif l == 'u':\n self.up = nlist\n elif l == 'd':\n self.down = nlist\n elif l == 'l':\n self.left = nlist\n elif l == 'r':\n self.right = nlist\n elif l == 'b':\n self.back = nlist", "def near_clipping_face(self):\n pln = self.tripod.plane\n l, r, b, t, n, f = self.body.dim\n face = gt.Plin((l, b, -n), (r, b, -n), (r, t, -n), (l, t, -n))\n return pln.TM * face", "def xyz2fxy(x,y,z):\n f,X,Y = xyz2facestereo(x,y,z)\n X,Y = stereo2tansquare(X,Y)\n return np.asarray(f,int),np.asfarray(X),np.asfarray(Y)", "def front(self):\n return _osgAnimation.VertexList_front(self)", "def getFrontCoord(self, colour, startCoord, rowDirection, columnDirection, arrangement, isLine, points):\r\n print \"Doing Front\"\r\n # We create a list to hold the front coordinates\r\n meshFrontLocation = []\r\n\r\n # We create a mechanism that triggers when there is infinite loop\r\n CountLoop = 0\r\n\r\n # Loop front view till no intended colour is detected\r\n while True:\r\n # Trigger mechanism when there is infinite loop\r\n CountLoop += 1\r\n if CountLoop > 10000:\r\n print \"CountLoop is more then 10000, mechanism to prevent infinite loop during get3DCoord activated\"\r\n break\r\n\r\n # Detect the colour pixel\r\n PixelDetected = self.frontImage.detectColourPixel(colour, startCoord, rowDirection, columnDirection)\r\n\r\n # If pixel is detected\r\n if len(PixelDetected) != 0:\r\n # traceLine(self, colour, StartCoord, rowScan, columnScan)\r\n CoordList, startCoord = self.frontImage.traceLine(colour, PixelDetected, rowDirection, columnDirection)\r\n # We rearrange to coordlist to a certain arrangement so we can get the same starting point in front and side\r\n CoordList = self.rearrange(CoordList, arrangement)\r\n\r\n # We check if it is a line or not. If it is a line, we half the coordinates\r\n if isLine == 1:\r\n CoordList = CoordList[:len(CoordList) / 2]\r\n # We then take note of a certain number of points in the CoordList\r\n\r\n\r\n stride = len(CoordList) / float(points)\r\n\r\n # We add the coordinates to the meshFrontLocation list\r\n #Safety mechanism for infinite loop\r\n loopTrigger = 0\r\n currentIndex = 0.0\r\n while loopTrigger < points:\r\n loopTrigger +=1\r\n if loopTrigger > 10000:\r\n print \"Infinite loop in front coord scanning stride part!!\"\r\n break\r\n i = int(currentIndex)\r\n meshFrontLocation.append([CoordList[i][0], CoordList[i][1]])\r\n currentIndex+=stride\r\n\r\n # If no pixel is detected\r\n else:\r\n break\r\n\r\n return meshFrontLocation", "def translateToOriginXform(self):\n return np.array([[1, 0, 0, -self.eye[0]],\n [0, 1, 0, -self.eye[1]],\n [0, 0, 1, -self.eye[2]],\n [0, 0, 0, 1]])", "def front(self):\n return _osgAnimation.vectorVec3Keyframe_front(self)", "def float_to_front(qtile):\n for window in qtile.current_group.windows:\n if window.floating:\n window.cmd_bring_to_front()", "def origin(self):\r\n\r\n return self.ox, self.oy, self.oz", "def GetSurfaceConversion(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeConvertToBezier_GetSurfaceConversion(self, *args)", "def kinect_transform(self, x, y, z):\n xposition = x\n yposition = y\n zposition = z\n\n return zposition, xposition, yposition", "def position_to_Fourier(self):\n #TODO Try to do it with FFT \n U = self.alphas @ self.positions\n \n return U", "def forward_vector(self):\n return pm.datatypes.Vector(0, 0, 1).rotateBy(self.rotation)", "def front(self):\n return _osgAnimation.vectorVec4Keyframe_front(self)", "def front(self):\n return _osgAnimation.vectorVec2Keyframe_front(self)", "def transformToOrigin(self):\n return Transform.shiftOrigin(self.point, self.head)" ]
[ "0.6466102", "0.5958782", "0.59478974", "0.59458196", "0.5798166", "0.57393557", "0.5688547", "0.56672025", "0.56672025", "0.56627524", "0.5614368", "0.5602697", "0.5566901", "0.5562923", "0.55597514", "0.5549741", "0.55102384", "0.55058265", "0.54947865", "0.54475546", "0.5416932", "0.5410108", "0.54082084", "0.5404698", "0.53981113", "0.53310734", "0.5323869", "0.53236276", "0.53093594", "0.5273548" ]
0.7374416
0
Called by base init, after class change or format text change
def initFormat(self): pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def init_text(self):\n d = self.declaration\n if d.text:\n self.set_text(d.text)\n if d.text_color:\n self.set_text_color(d.text_color)\n if d.text_alignment:\n self.set_text_alignment(d.text_alignment)\n if d.font_family or d.text_size:\n self.refresh_font()\n if hasattr(d, 'max_lines') and d.max_lines:\n self.set_max_lines(d.max_lines)", "def initFormat(self):\n self.formatList = self.splitText(self.format)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, name, attrs={}):\n TextFormat.__init__(self, name, attrs)", "def __init__(self, text):\n\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self, text):\n self.text = text", "def __init__(self):\n self.text = ''", "def set_text(self):\n pass", "def post_init(self):\n\t\tpass", "def init_widget(self):\n super(UiKitTextView, self).init_widget()\n self.init_text()", "def __init__(self,\n text: str) -> None:\n\n super().__init__(text)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.formatList = []", "def _post_init(self):\n pass", "def __post_init__(self):\n pass", "def initWidgets(self):\n self.lambdtext.setText(str(self.lambd))\n self.ptext.setText(str(self.p))", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def done_adding_strings(self):\n #placeholder in case there's some additional init we need to do.\n pass", "def _init_display(self):\n raise NotImplementedError", "def __init__(self):\n super(Command, self).__init__()\n self.style.TITLE = self.style.SQL_FIELD\n self.style.STEP = self.style.SQL_COLTYPE\n self.style.ITEM = self.style.HTTP_INFO\n disconnect_objectapp_signals()", "def __init__(self, font='mediumbold'):\n\tself.set_font(font)" ]
[ "0.70883477", "0.6957401", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6811701", "0.6801035", "0.67764556", "0.67764556", "0.6772573", "0.67218834", "0.6665987", "0.6530844", "0.6495981", "0.6494592", "0.6494592", "0.6490198", "0.6401653", "0.6355695", "0.63224435", "0.627716", "0.627716", "0.62600374", "0.6241324", "0.6241043" ]
0.7095915
0
Change this field's type to newType with default format
def changeType(self, newType): self.__class__ = globals()[newType + 'Format'] self.format = self.defaultFormat self.initFormat()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def field_type_converter(self, old_type):\n\n if old_type == 'String':\n new_type = 'Text'\n elif old_type == 'Integer':\n new_type = 'Short'\n elif old_type == 'Date':\n new_type = 'Date'\n elif old_type == 'GlobalID':\n new_type = 'GUID'\n else:\n new_type = 'Double'\n return new_type", "def reformat(self, newformat):\n # check whether the column is defined\n if self._defined:\n # get the appropriate null-format\n nullformat = self._get_nullformat(newformat)\n # set the new formats\n self._format = [newformat, nullformat]\n else:\n # first the column type must be defined\n raise Exception('The data type of this column is not yet defined!')", "def convert_type(self, value, schema_type, **kwargs):", "def setType(self,newtype):\n\t\tself.type = newtype;", "def _convert_field_type(row):\n return row", "def _make_serializable(self, field):\n if isinstance(field, datetime):\n return str(field)\n elif isinstance(field, Decimal):\n return float(field)\n else:\n return field", "def field_type(self):\n return \"\"", "def set_type(self, new_value):\n\n self.vax_type = new_value\n self.save()", "def restore_type(field_type, value):\n field_types = {\n 'BooleanField': string_to_bool,\n 'CharField': str,\n 'FloatField': float,\n 'IntegerField': int,\n }\n return_val = lambda x: x\n recast = field_types.get(field_type, return_val)\n return recast(value)", "def change_object_type(obj, new_type):\n # type: (Union[str, SupportsInt, SupportsFloat], str) -> Union[str, int, float]\n if new_type == 'str':\n return str(obj)\n elif new_type == 'int':\n return int(obj)\n elif new_type == 'float':\n return float(obj)\n else:\n raise IOError('expected_type \"{}\" is not supported in this function.'.format(new_type))", "def convert_format(self, new_format):\n if new_format not in [0, 1, 2, 3]:\n raise ValueError(\"Unknown format specified\")\n\n inp_format = new_format\n if inp_format == 3:\n new_format = 2\n\n for block in self.frd.blocks:\n if hasattr(block, 'format'):\n block.format = new_format\n\n self.frd.node_block.format = inp_format", "def _assign_type(self, type):\n if self.is_input:\n return 'data'\n else:\n return type", "def format_field(model, name, value):\n if value is None: return value\n t = type( getattr(model,name) )\n if t == datetime:\n return value.replace('T',' ')\n return value", "def update_column_format(self):\n pass", "def _change_column_type(self, t_trans, value):\n # create an element object\n val = ForElement(value)\n\n # if not set the type and the define flagg\n self._format = val.get_fvalue()\n\n # set the type to the one in the transformator object\n self._type = t_trans.higher_type\n\n # go over all data\n for index in range(len(self._data)):\n if self._data[index] != None:\n # transform all non-Null entries\n self._data[index] = t_trans.to_higher_type(self._data[index])", "def __post_init__(self):\n for field in dataclasses.fields(self):\n value = getattr(self, field.name)\n if not isinstance(value, field.type) and value:\n try:\n setattr(self, field.name, field.type(value))\n except ValueError:\n raise ValueError(f\"Expected {field.name} \"\n f\"to be {field.type}, \"\n f\"got {repr(value)}\")", "def to_field(obj):\r\n\r\n\r\n if isinstance(obj, Field):\r\n field = obj\r\n else:\r\n d = { \"storage_type\": \"unknown\" }\r\n\r\n if isinstance(obj, basestring):\r\n d[\"name\"] = obj\r\n elif type(obj) == tuple or type(obj) == list:\r\n d[\"name\"] = obj[0]\r\n try:\r\n d[\"storage_type\"] = obj[1]\r\n try:\r\n d[\"analytical_type\"] = obj[2]\r\n except:\r\n pass\r\n except:\r\n pass\r\n else: # assume dictionary\r\n d[\"name\"] = obj[\"name\"]\r\n d[\"label\"] = obj.get(\"label\")\r\n d[\"storage_type\"] = obj.get(\"storage_type\")\r\n d[\"analytical_type\"] = obj.get(\"analytical_type\")\r\n d[\"adapter_storage_type\"] = obj.get(\"adapter_storage_type\")\r\n\r\n if \"analytical_type\" not in d:\r\n storage_type = d.get(\"storage_type\")\r\n if storage_type:\r\n deftype = default_analytical_types.get(storage_type)\r\n d[\"analytical_type\"] = deftype or \"typeless\"\r\n else:\r\n d[\"analytical_type\"] = \"typeless\"\r\n\r\n field = Field(**d)\r\n return field", "def change_type(self, col_name, str_type):\n if self[col_name] is not None:\n self[col_name] = self[col_name].astype(str_type)", "def __update_custom_fieldtype_settings(self,\n eachfield, #field etree\n ):\n\n # xml attributes\n TYPE = \"type\"\n READABLE = \"readable\"\n WRITABLE = \"writable\"\n LABEL = \"label\"\n HINT = \"comment\"\n DEFAULT = \"default\"\n LINES = \"lines\"\n BOXES = \"boxes\"\n HASOPTIONS = \"has_options\"\n\n fieldtype = eachfield.attrib.get(TYPE)\n field_property = self.custom_fieldtype_properties.get(fieldtype, {})\n\n cust_fieldtype = fieldtype_property.get(\"fieldtype\", None)\n cust_readable = fieldtype_property.get(\"readable\", None)\n cust_writable = fieldtype_property.get(\"writable\", None)\n cust_label = fieldtype_property.get(\"label\", None)\n cust_hint = fieldtype_property.get(\"hint\", None)\n cust_default = fieldtype_property.get(\"default\", None)\n cust_lines = fieldtype_property.get(\"lines\", None)\n cust_boxes = fieldtype_property.get(\"boxes\", None)\n cust_has_options = fieldtype_property.get(\"has_options\", None)\n cust_options = fieldtype_property.get(\"options\", None)\n \n if cust_fieldtype:\n if cust_fieldtype != None:\n eachfield.set(TYPE, cust_fieldtype)\n if cust_readable != None:\n eachfield.set(READABLE, cust_readable)\n if cust_writable != None:\n eachfield.set(WRITABLE, cust_writable)\n if cust_label != None:\n eachfield.set(LABEL, cust_label)\n if cust_hint != None:\n eachfield.set(HINT, cust_hint)\n if cust_default != None:\n eachfield.set(DEFAULT, cust_default)\n if cust_lines != None:\n eachfield.set(LINES, cust_lines)\n if cust_boxes != None:\n eachfield.set(BOXES, cust_boxes)\n if cust_has_options != None:\n eachfield.set(HASOPTIONS, cust_has_options)\n if cust_options != None:\n opt_available = eachfield.getchildren()\n if len(opt_available) == 0:\n eachfield.append(cust_options)\n elif len(opt_available) == 1:\n eachfield.remove(opt_available[0])\n eachfield.append(cust_options)", "def _update_input_type(self):\n pass", "def set_format_by_type(self, value, format):\n self.set_render_func_by_type(value, format.format)", "def renameFields(self, nameDict):\n for format in self.values():\n if format.genericType in nameDict:\n nameDict[format.name] = nameDict[format.genericType]\n for item in globalref.docRef.root.descendantGen():\n for oldName, newName in nameDict.get(item.formatName, []):\n if oldName in item.data:\n item.data[newName] = item.data[oldName]\n del item.data[oldName]", "def make_field(field):\n\n if \"time\" in field:\n return TimeField(field)\n if \"zd\" in field:\n return RadianField(field)\n else:\n return SimpleField(field)", "def __createField(self, field):\n name = field['name']\n fType = field['type']\n fieldLength = None\n if 'shape' in name.lower():\n return\n elif \"String\" in fType:\n fieldType = \"TEXT\"\n fieldLength = field['length']\n elif \"Date\" in fType:\n fieldType = \"DATE\"\n elif \"SmallInteger\" in fType:\n fieldType = \"SHORT\"\n elif \"Integer\" in fType:\n fieldType = \"LONG\"\n elif \"Double\" in fType:\n fieldType = \"DOUBLE\"\n elif \"Single\" in fType:\n fieldType = \"FLOAT\"\n else:\n fieldType = \"Unknown\"\n featureClass = self.featureClassLocation + \"\\\\\" + self.name\n validatedName = arcpy.ValidateFieldName(name, self.featureClassLocation)\n arcpy.AddField_management(in_table=featureClass, field_name=name, field_type=fieldType, field_length=fieldLength)", "def _to_base_type(self, value):\n if value is None:\n return ''\n else:\n return value.to_json()", "def output_field(self):\n Field = self.original_field.__class__\n if isinstance(self.original_field, fields.CharField):\n return Field(max_length=self.original_field.max_length)\n\n return Field()", "def api_field_from_django_field(cls, f, default=CharField):\n if isinstance(f, JSONField):\n return JSONApiField\n \n return super(PandaModelResource, cls).api_field_from_django_field(f, default)", "def replace_typeval(self, combined, replacement):\n raise NotImplementedError(\"This is an abstract method.\")", "def convert_type(cls, prop_obj, column_name, specific_type, empty_value):\n for key, item in enumerate(prop_obj):\n if item[column_name]:\n prop_obj[key][column_name] = specific_type(item[column_name])\n else:\n prop_obj[key][column_name] = empty_value", "def _new_field(self):\n field = self.domain.new_field()\n return field" ]
[ "0.7694419", "0.6731671", "0.6345279", "0.6247612", "0.6140391", "0.6116279", "0.60566497", "0.60437393", "0.6029452", "0.6012749", "0.5938881", "0.589731", "0.58825034", "0.58494186", "0.582059", "0.58018064", "0.57945174", "0.57587177", "0.57267576", "0.5711055", "0.57083714", "0.5697486", "0.56484854", "0.56402653", "0.5629887", "0.5577035", "0.55711544", "0.55596805", "0.5544898", "0.55447626" ]
0.7249991
1
Returns English name if assigned, o/w name
def englishName(self): if self.enName: return self.enName return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def english_name(self) -> str | None:\n return self.get_display_name(Locale('en'))", "def get_localized_name(name):\n locale = \"{}_{}\".format(\n name[\"preferredLocale\"][\"language\"],\n name[\"preferredLocale\"][\"country\"]\n )\n return name['localized'].get(locale, '')", "def primary_name(names):\n\tlangs = names.keys()\n\tif 'en' in langs:\n\t\treturn names['en']\n\treturn names[langs[0]]", "def get_name() -> str:", "def get_eng_name(self):\n return self.eng_name", "def get_name():", "def get_fulll_name(self):\n return self.name", "def get_name(self) -> str:\n def _seg2():\n if self.name:\n return self.name\n else:\n try:\n return self.player.title\n except AttributeError:\n return 'No title specified'\n try:\n if self.player.title == 'translate_tts':\n return 'Speech'\n else:\n return _seg2()\n except AttributeError:\n return _seg2()", "def get_display_name(self, locale: Locale | str | None = None) -> str | None:\n if locale is None:\n locale = self\n locale = Locale.parse(locale)\n retval = locale.languages.get(self.language)\n if retval and (self.territory or self.script or self.variant):\n details = []\n if self.script:\n details.append(locale.scripts.get(self.script))\n if self.territory:\n details.append(locale.territories.get(self.territory))\n if self.variant:\n details.append(locale.variants.get(self.variant))\n if self.modifier:\n details.append(self.modifier)\n detail_string = ', '.join(atom for atom in details if atom)\n if detail_string:\n retval += f\" ({detail_string})\"\n return retval", "def get_name_translation(self):\n\t\treturn frappe.get_value(\n\t\t\t\"Translation\",\n\t\t\t{\"source_text\": self.doc_type, \"language\": frappe.local.lang or \"en\"},\n\t\t\t[\"name\", \"translated_text\"],\n\t\t\tas_dict=True,\n\t\t)", "def get_level_name(self, level_id):\n for (english_name, level_package) in self.levels[self.game]:\n if level_package.lower() == level_id.lower():\n return english_name\n return None", "def get_name() -> str:\n pass", "def get_name(self):\n return self.normalize_name(self.name)", "def get_full_language(self, language):\n if language:\n language = pycountry.languages.get(alpha_2=language)\n if language:\n language = language.name\n return language.title()", "def get_name(self):\n if self.name != None: return self.name\n else: return self.get_name_from_items(self.items.values())", "def get_descriptive_name(self):\r\n long_name = str(self.year) + ' ' + self.make + ' ' + self.model\r\n #Mostrar_Grande = long_name.upper()\r\n #return long_name.upper()\r\n #return Mostrar_Grande #Funciona Com Return TAMBÉM, mas olhe na linha 39 como seria necessário usar.\r\n print(long_name.upper())", "def get_full_name_with_academic_title(self) -> str:\n base_name = super().get_full_name()\n return f'{self.title} {base_name}' if self.title else base_name", "def get_full_name(self):\n return self.name #self is base and it hits name filed", "def get_full_name(self):\n\n return self.name", "def get_language_name(self):\n return self.language_name", "def full_name(self) -> Optional[str]:\n return pulumi.get(self, \"full_name\")", "def get_name(self):\n return self.load_name(self.subject)", "def LegacyName(self, default=None):\n return self.data.get('legacy_name', default)", "def display_name(self) -> Optional[Mapping[str, str]]:\n return pulumi.get(self, \"display_name\")", "def get_name():\n\n return character['Name']", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name", "def get_full_name(self):\n return self.name" ]
[ "0.7934382", "0.73227644", "0.7047755", "0.6930511", "0.68412167", "0.6786411", "0.67855275", "0.6760979", "0.6730141", "0.6662109", "0.6648933", "0.66485107", "0.66408026", "0.6632018", "0.65623486", "0.655018", "0.65476584", "0.6501709", "0.64950544", "0.64857614", "0.647543", "0.6458725", "0.64511305", "0.6439916", "0.64379627", "0.64333516", "0.64333516", "0.64333516", "0.64333516", "0.64333516" ]
0.80885744
0
Return name used for labels add for required fields
def labelName(self): if self.isRequired: return '%s*' % self.name return self.name
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def label_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"label_name\")", "def build_label_text(field_name: str, field: dict):\n\n label = \"\"\n if \"required\" in field:\n label = \" * \" if field.get(\"required\") else \"\"\n\n # If we don't have a label defined, used the field name\n if \"label\" not in field:\n field.update({\"label\": field_name.upper()})\n\n label += field[\"label\"]\n\n return label", "def label_name(self) -> str:\n return pulumi.get(self, \"label_name\")", "def name_field(self):\r\n return 'name'", "def label(self):\r\n return self._name", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def getName():", "def Label(self) -> str:", "def label(self):\n return ''", "def get_name(self):\n return self._label", "def get_labelname(self):\n return self.options['labelname']", "def get_label(name):\n lower = name.lower()\n vals = lower.split('_')\n if 'ho' in vals:\n name = 'Independent Estimate'\n elif 'alldata' in vals:\n name = 'Extra-Data Estimate'\n elif 'ris' in vals[0]:\n name = 'RIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n elif 'is' in vals[0]:\n name = 'OIS'\n if 'w' in vals[0]:\n name += ' WIS'\n if 'pd' in vals[0]:\n name += ' PDIS'\n if 'dr' in vals:\n name += ' DR'\n if 'wdr' in vals:\n name += ' WDR'\n return name", "def create_label(self, org, name):\n pass", "def _get_label ( self ):\n if self._label is not None:\n return self._label\n return self.name", "def get_label():\n inp = option_text('Input label name (leave blank for no label):')\n add_to_collected('label', inp)\n OPTIONS['label'] = inp\n return", "def label(self) -> str:\n return self[\"label\"]", "def _get_label(self):\n if self.model.name == '':\n return \"KPI\"\n return \"KPI: {} ({})\".format(self.model.name, self.model.objective)", "def label(self) -> str:\r\n\r\n return self.__label", "def getName(self):\n return \"\"", "def name(self):\n\t\t# This is necessary for ColumnLists that are used\n\t\t# for CondDescs as well. Ideally, we'd do this on an\n\t\t# InputKeys basis and yield their names (because that's what\n\t\t# formal counts on), but it's probably not worth the effort.\n\t\treturn \"+\".join([f.name for f in self.inputKeys])", "def field_names(self):\n\n entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()\n document_title_name = Document._meta.get_field('name').verbose_name.title()\n document_url_name = Document._meta.get_field('url').verbose_name.title()\n\n form = self.form.all()[0]\n return ['user'] \\\n + [document_title_name, document_url_name] \\\n + [f.label\n for f in form.fields.all()] \\\n + [entry_time_name]", "def label(self) -> str:\r\n self._logger.debug(log_message_formatter(\r\n \"get\", f\"{self}\", \"label\"))\r\n return self._name", "def get_name(self):", "def get_name(self):", "def get_name():", "def field_label(field_name, bushfire=None):\r\n field_name = FIELD_MAPPING.get(field_name) or field_name\r\n if bushfire:\r\n try:\r\n return bushfire._meta.get_field(field_name).verbose_name\r\n except:\r\n return field_name\r\n else:\r\n return field_name" ]
[ "0.7235186", "0.7140648", "0.7047591", "0.7011272", "0.69102323", "0.678387", "0.678387", "0.678387", "0.678387", "0.678387", "0.678387", "0.67811215", "0.67751276", "0.67363954", "0.67283016", "0.6711237", "0.6679455", "0.6657541", "0.66369164", "0.65833545", "0.65801924", "0.65796936", "0.6577343", "0.65438753", "0.6540091", "0.65350425", "0.65324503", "0.65324503", "0.6524761", "0.65048337" ]
0.82659084
0
Return formatted text, properly escaped if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): prefix = self.prefix suffix = self.suffix if titleMode: if self.html: storedText = self.removeMarkup(storedText) if globalref.docRef.formHtml: prefix = self.removeMarkup(prefix) suffix = self.removeMarkup(suffix) else: if not self.html: storedText = escape(storedText).replace('\n', '<br />') if not globalref.docRef.formHtml: prefix = escape(prefix) suffix = escape(suffix) return u'%s%s%s' % (prefix, storedText, suffix)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def format_title(self, data):\n return data", "def output_plain_sep_title(title):\n print(f\"{plain_sep_mark}\\t{title}{plain_sep_mark}\")", "def formatOutput(self, storedText, titleMode, internal=False):\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def get_text(downgrade_titles=False):", "def PROPER(text):\n return text.title()", "def title(self, string):\n return self.bold(string)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def get_title_repr(self) -> str:\n try:\n return Title[self.title].value\n except (KeyError, ValueError):\n pass", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def generate_title(self, title=None):\n if title is None:\n title = self.header.get('title', self.title)\n\n title = self.generate(title)\n title = title.replace('<p>', '').replace('</p>', '')\n # no trailing newlines\n title = re.sub(r'\\n+', ' ', title).rstrip()\n return title", "def formatText(s, bold=False, underlined=False, negative=False):\n\n if not FORMATTING_AVAILABLE:\n return s\n\n head = \"\"\n if bold: head += \"\\033[1m\"\n if underlined: head += \"\\033[4m\"\n if negative: head += \"\\033[7m\"\n\n return head + s + \"\\033[0m\"", "def emph_text(text):\n\n if use_color():\n return colorama.Style.BRIGHT + text + colorama.Style.RESET_ALL\n else:\n return text", "def formatOutput(self, storedText, titleMode, internal=False):\n if storedText not in self.formatList:\n try:\n storedText = GenBoolean(storedText).boolStr(self.format)\n except GenBooleanError:\n storedText = _errorStr\n return TextFormat.formatOutput(self, storedText, titleMode, internal)", "def format_text(self):\n\n return \"{}{}{}\".format(self.get_text(),\n Message.format_performers(self.get_performers()),\n Message.format_keywords(self.get_keywords())).strip()", "def title_string(self):\n return ' '.join(self.title).replace(' - ', '')", "def formatOutput(self, storedText, titleMode, internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = ['<img src=\"%s\">' % escape(url, treedoc.escDict) for url\n in paths]\n return u'<br />'.join(results)", "def _text_formatting(bs4_tag):\n return bs4_tag.get_text().replace('\\n', '')", "def print_with_title(title, content, before='', after='', hl='='):\n cont_maxlen = max(len(s) for s in content.split('\\n'))\n hl_len = max(cont_maxlen, len(title))\n print('{}{}\\n{}\\n{}{}'.format(before, title, hl * hl_len, content, after))", "def title(self, txt):\n num = len(txt)\n ticks = \"=\" * num\n print(ticks)\n print(txt)\n print(ticks)", "def test_title(self):\n self.assertEquals(\"Title\\n=====\", trans(\"== Title ==\"))\n self.assertEquals(\"Title\\n-----\", trans(\"=== Title ===\"))\n self.assertEquals(\"#### Title\", trans(\"==== Title ====\"))\n self.assertEquals(\"##### Title\", trans(\"===== Title =====\"))", "def helptext(self):\n return \"\"", "def book_title(book_text):\n search = re.search(\"Title:(.*)\", book_text)\n title = search.group(1).replace(\"\\r\", \" \").strip()\n return title", "def get_title():", "def __str__(self):\n date_str = self.date.strftime(self.journal.config['timeformat'])\n title = date_str + \" \" + self.title\n body = self.body.strip()\n\n return \"{title}{sep}{body}\\n\".format(\n title=title,\n sep=\"\\n\" if self.body else \"\",\n body=body\n )", "def __str__(self) -> str:\n return textwrap.wrap(self.title, _POST_TITLE_MAX_LENGTH // 4)[0]", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def format_title(self, title):\n new_title = ''.join(word.lower().strip('!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~ ') for word in title)\n return new_title" ]
[ "0.6623557", "0.64947814", "0.6347113", "0.6307539", "0.621596", "0.6210496", "0.60684896", "0.60674477", "0.60663515", "0.60421175", "0.6019259", "0.59935653", "0.59802073", "0.59790826", "0.595393", "0.5948588", "0.5939195", "0.590317", "0.5872387", "0.58521676", "0.5838757", "0.5835408", "0.5834278", "0.5832544", "0.58303535", "0.58232164", "0.58196765", "0.5818879", "0.581837", "0.58134586" ]
0.67517006
0
Return tuple of this field's text in edit format and bool validity, using edit format option
def editText(self, item): storedText = item.data.get(self.name, '') result = self.formatEditText(storedText) if self.isRequired and not result[0]: return (result[0], False) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def _get_field_edit_widget(self, row_index):\n field_row = self.field_rows[row_index]\n if not field_row.editable:\n raise TypeError(\"Cannot edit a boolean or dropdown field. (Internal error, tell the developer!)\")\n field_type = field_row.field_type\n field_value = self.get_field_dict(self.get_entry_id(self.active_row_index))[field_row.field_name]\n initial_text = repr(sorted(field_value)) if issubclass(field_type, list) else str(field_value)\n return self.Entry(\n field_row.value_box,\n initial_text=initial_text,\n integers_only=field_type == int,\n numbers_only=field_type == float,\n sticky=\"ew\",\n width=5,\n )", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def getinforow(docfield):\n\t\tif docfield.fieldtype == 'Select':\n\t\t\tif not docfield.options:\n\t\t\t\treturn ''\n\t\t\telif docfield.options.startswith('link:'):\n\t\t\t\treturn 'Valid %s' % docfield.options[5:]\n\t\t\telse:\n\t\t\t\treturn 'One of: %s' % ', '.join(filter(None, docfield.options.split('\\n')))\n\t\telif docfield.fieldtype == 'Link':\n\t\t\treturn 'Valid %s' % docfield.options\n\t\telif docfield.fieldtype in ('Int'):\n\t\t\treturn 'Integer'\n\t\telif docfield.fieldtype == \"Check\":\n\t\t\treturn \"0 or 1\"\n\t\telif docfield.info:\n\t\t\treturn docfield.info\n\t\telse:\n\t\t\treturn ''", "def getText(self):", "def get_text_format(self) -> constants.TextFormatStr:\n return constants.TEXT_FORMAT.inverse[self.textFormat()]", "def complete_opt_format(self, text, *_):\n return [t + \" \" for t in FORMATTERS if t.startswith(text)]", "def test_form_help_text_is_correct(self):\n # https://stackoverflow.com/questions/24344981/how-to-change-help-\n # text-of-a-django-form-field\n\n # Above link helped figure out how to access help_text.\n self.assertEqual(\n self.form.fields[\"texture\"].help_text,\n \"One word descriptions seperated by commas.\",\n )", "def text(value):\n return True", "def get_format_opts(cls, format_=\"value\", fields=[]):\n return \" -f {0} {1}\".format(format_, \" \".join([\"-c \" + it for it in fields]))" ]
[ "0.7739331", "0.76100457", "0.7482231", "0.7482231", "0.7214738", "0.7091386", "0.702513", "0.7018469", "0.6923507", "0.67845714", "0.67359626", "0.6618689", "0.6553471", "0.6410668", "0.63894486", "0.6138071", "0.56394523", "0.5639128", "0.5639128", "0.5639128", "0.5639128", "0.5639128", "0.5638955", "0.5513468", "0.55053264", "0.53779685", "0.5359518", "0.53337234", "0.5307074", "0.5301762" ]
0.76801556
1
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): return self.formatEditText(self.initDefault)[0]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def get_initial(self):\n\t\treturn self.initial", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_val_str(self):\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n return \"\"\n elif fmt_str:\n return fmt_str % (self.val_obj.val)\n else:\n return str(self.val_obj.val)", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def get_initial(self):\n return self.initial", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def _getAlterToFormat(cls, alter):\n if alter == '':\n alter = ['', '']\n if isinstance(alter, str): # nothing to do if it is dict\n alter = ['', alter]\n return alter", "def value(self):\n return str(self.input.currentText())", "def presentation(self, value):\r\n return value", "def default_formatter(self, data):\n return data", "def get_format(self):\n return self._format[0]", "def initial_value(self):\n return self._initial_value" ]
[ "0.69168735", "0.6777391", "0.6163817", "0.5962609", "0.59190995", "0.5825621", "0.5639453", "0.55958575", "0.5588548", "0.55880916", "0.55728984", "0.5547174", "0.55372924", "0.5518307", "0.55125266", "0.54999983", "0.54888153", "0.54888153", "0.54887563", "0.5471209", "0.5466419", "0.545412", "0.54069316", "0.54069316", "0.53839904", "0.5377033", "0.53640187", "0.53620666", "0.5355793", "0.5349789" ]
0.7327655
0
Return a list of choices for setting the init default
def initDefaultChoices(self): return []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def create_default_repo_choice(self, default_repo):\n return (default_repo, default_repo)" ]
[ "0.83096915", "0.8089902", "0.7565213", "0.7451019", "0.699929", "0.699929", "0.680488", "0.67091656", "0.66209406", "0.65692645", "0.6532258", "0.6486172", "0.64289325", "0.6406578", "0.63146526", "0.62376446", "0.62375015", "0.62119025", "0.61605716", "0.6160515", "0.6089932", "0.6064072", "0.60535115", "0.60409874", "0.6025764", "0.6001356", "0.5992603", "0.5973309", "0.59606636", "0.5928593" ]
0.8791058
0
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n try:\n return (repr(GenBoolean(editText)), True)\n except GenBooleanError:\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words" ]
[ "0.76840067", "0.7569935", "0.7569935", "0.73801994", "0.7312141", "0.7185404", "0.71525943", "0.70902133", "0.69048536", "0.6863909", "0.6808694", "0.67496157", "0.66044503", "0.62728953", "0.6122511", "0.60096884", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5692688", "0.5534609", "0.55292165", "0.55122524", "0.54905003", "0.5460751", "0.539562", "0.5386719", "0.5354485", "0.53479636" ]
0.787282
0
Return list of choices for combo box, each a tuple of edit text and any annotation text
def getEditChoices(self, currentText=''): return [(text, '') for text in self.formatList]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def choices(self, typename, value_field='description'):\n rows = self.type(typename).values('id', value_field)\n return [(r['id'], r[value_field]) for r in rows]", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def initDefaultChoices(self):\n return [entry[0] for entry in self.getEditChoices()]", "def select_combo_text(cb, text, index=0):\n i = 0\n for n in cb.get_model():\n if n[index] == text:\n break\n i += 1\n cb.set_active(i)", "def application_command_autocomplete_choice_builder(name, value):\n return {\n 'name': name,\n 'value': value,\n }", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def choices(self):\n return tuple(self._choices)", "def iter_choices(self):\n\n for pk, obj in self._get_object_list():\n if hasattr(obj, self.id):\n selected = getattr(obj, self.id)\n else:\n selected = obj in self.data\n\n yield (pk, self.get_label(obj), selected)", "def _get_choices_str(self):\n return ', '.join(\n '\"%s\"' % choice\n for choice in self.choices\n )", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditDateFormat', True)\n today = GenDate().dateStr(format)\n yesterday = (GenDate() - 1).dateStr(format)\n tomorrow = (GenDate() + 1).dateStr(format)\n return [(today, '(%s)' % _('today')),\n (yesterday, '(%s)' % _('yesterday')),\n (tomorrow, '(%s)' % _('tomorrow'))]", "def fill_combobox_attributes(self):\n\n list_char = [\"\"]\n list_num = [\"\"]\n if self.ui.radioButton_file.isChecked():\n for a in self.attributes:\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"file\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n else:\n for a in self.attributes:\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"character\":\n list_char.append(a['name'])\n if a['caseOrFile'] == \"case\" and a['valuetype'] == \"numeric\":\n list_num.append(a['name'])\n self.ui.comboBox_num_attributes.blockSignals(True)\n self.ui.comboBox_char_attributes.blockSignals(True)\n self.ui.comboBox_num_attributes.clear()\n self.ui.comboBox_char_attributes.clear()\n self.ui.comboBox_char_attributes.addItems(list_char)\n self.ui.comboBox_num_attributes.addItems(list_num)\n self.ui.comboBox_num_attributes.blockSignals(False)\n self.ui.comboBox_char_attributes.blockSignals(False)", "def getComboTerms(tuples):\t\t\t\n\t\t\t#return \"[{0}]\".format('; '.join([\"({0})\".format(','.join([text[indices[0]:indices[1]], str(indices[0])])) for indices in tuples]))\n\t\t\treturn \"{0}\".format('; '.join((\"{0}\".format(text[indices[0]:indices[1]]) for indices in tuples)))", "def getEditChoices(self, currentText=''):\n format = globalref.options.strData('EditTimeFormat', True)\n now = GenTime().timeStr(format)\n choices = [(now, '(%s)' % _('now'))]\n for hr in (6, 9, 12, 15, 18, 21, 0):\n time = GenTime((hr, 0)).timeStr(format)\n choices.append((time, ''))\n return choices", "def get_choices(cls):\n return cls.values.items()", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def create_combo(self) -> typing.Iterable[Combo]:\n raise NotImplementedError()", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def extract_choices(element):\r\n\r\n choices = []\r\n\r\n for choice in element:\r\n if choice.tag != 'choice':\r\n raise Exception(\r\n \"[capa.inputtypes.extract_choices] Expected a <choice>\" +\r\n \"tag; got {0} instead\".format(choice.tag)\r\n )\r\n\r\n components = []\r\n choice_text = ''\r\n if choice.text is not None:\r\n choice_text += choice.text\r\n # Initialize our dict for the next content\r\n adder = {\r\n 'type': 'text',\r\n 'contents': choice_text,\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n components.append(adder)\r\n\r\n for elt in choice:\r\n # for elements in the choice e.g. <text> <numtolerance_input>\r\n adder = {\r\n 'type': 'text',\r\n 'contents': '',\r\n 'tail_text': '',\r\n 'value': ''\r\n }\r\n tag_type = elt.tag\r\n # If the current `elt` is a <numtolerance_input> set the\r\n # `adder`type to 'numtolerance_input', and 'contents' to\r\n # the `elt`'s name.\r\n # Treat decoy_inputs and numtolerance_inputs the same in order\r\n # to prevent students from reading the Html and figuring out\r\n # which inputs are valid\r\n if tag_type in ('numtolerance_input', 'decoy_input'):\r\n # We set this to textinput, so that we get a textinput html\r\n # element.\r\n adder['type'] = 'textinput'\r\n adder['contents'] = elt.get('name')\r\n else:\r\n adder['contents'] = elt.text\r\n\r\n # Add any tail text(\"is the mean\" in the example)\r\n adder['tail_text'] = elt.tail if elt.tail else ''\r\n components.append(adder)\r\n\r\n # Add the tuple for the current choice to the list of choices\r\n choices.append((choice.get(\"name\"), components))\r\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def combo_callback(self, eventobj):\n print(self.combo_input.get()) # print name\n print(self.combo_input.current()) # print index", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def initDefaultChoices(self):\n return [text for text in self.formatList]" ]
[ "0.64435107", "0.64435107", "0.6406375", "0.6369448", "0.6282898", "0.61602914", "0.61550856", "0.6096663", "0.6079173", "0.6074226", "0.599768", "0.5979722", "0.5946701", "0.59085536", "0.58665997", "0.5852769", "0.5851758", "0.5840527", "0.58387506", "0.5816007", "0.5803215", "0.5797734", "0.57951343", "0.57936877", "0.57807535", "0.5749749", "0.57457596", "0.5732043", "0.57093215", "0.57050306" ]
0.6981332
0
Split textStr using editSep, double sep's become char
def splitText(self, textStr): return [text.strip().replace('\0', self.editSep) for text in textStr.replace(self.editSep * 2, '\0'). split(self.editSep)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def split(string, sep='\\t'):\n return text_type.split(string, sep)", "def extended(self, new_char, new_char_index, sep=' '):\n if new_char == sep:\n return TextState(self.text + new_char, '', new_char_index), self.last_word\n if sep == '':\n return TextState(self.text + new_char, new_char, new_char_index), self.last_word\n return TextState(self.text + new_char, self.last_word + new_char, new_char_index), None", "def mysplit(s,delims):\r\n for c in delims:\r\n s = s.replace(c,' ')\r\n return s.split()", "def tsplit(s, sep):\n stack = [s]\n for char in sep:\n pieces = []\n for substr in stack:\n pieces.extend(substr.split(char))\n stack = pieces\n return stack", "def parse_text(text, delimiter, position):\n new_text = text.split(delimiter)[position]\n\n return new_text", "def multi_split(text, seps):\n if not seps: # split by whitespaces\n return text.split()\n else: # split by separators in `seps`\n\n ##### Topics on Stack Overflow\n # http://stackoverflow.com/questions/1059559/python-strings-split-with-multiple-separators\n\n ## Method 1: use `re.split()` (from gimel)\n return re.split(r'[%s]' % seps, text)\n\n ## Method 2: DIY (from pprzemek)\n '''\n res = [text]\n for sep in seps:\n text, res = res, []\n for s in text:\n res += s.split(sep)\n return res\n '''", "def two_split_delimiters(text: str, delimiters: list) -> list:\n split_text = []\n prev_split = -1\n\n for text_index in range(len(text)):\n for delimiter in delimiters:\n if(text[text_index] == delimiter):\n split_text.append(text[prev_split+1:text_index])\n prev_split = text_index\n\n split_text.append(text[prev_split+1:text_index+1])\n\n return split_text", "def separate(delim):\n # Return a function that takes an argument s, which when called will split\n # s over the delimiter specified (i.e. the delim parameter).\n return lambda s: s.split(delim)", "def __split_for_delimiter__(self, string):\n if not self.__delimiter__ == '':\n return string.split(self.__delimiter__)\n return string.split()", "def split(inp_str, sep_char, maxsplit=-1, escape_char='\\\\'):\n\n word_chars = []\n word_chars_append = word_chars.append\n\n inp_str_iter = iter(inp_str)\n\n for c in inp_str_iter:\n word_chars_append(c)\n if c == escape_char:\n try:\n next_char = next(inp_str_iter)\n except StopIteration:\n continue\n if next_char == sep_char:\n word_chars[-1] = next_char\n else:\n word_chars.append(next_char)\n elif c == sep_char:\n word_chars.pop()\n yield ''.join(word_chars)\n maxsplit -= 1\n if maxsplit == 0:\n yield ''.join(inp_str_iter)\n return\n del word_chars[:]\n\n yield ''.join(word_chars)", "def test_splitDelimiters(self):\n r = irc.split(\"xx yyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)\n r = irc.split(\"xx\\nyyz\", 2)\n self.assertEqual([\"xx\", \"yy\", \"z\"], r)", "def test_two_chars_and_separator():\n assert my_splitter(\",J\", \",\") == [\"\", \"J\"]", "def split(text, delim=','):\n return [x.strip() for x in text.split(delim)]", "def sep(self):\n return self.sep_index", "def splitAtSeparators(expressions):\n splitExpressions = []\n wordStart = 0\n for index, expression in enumerate(expressions):\n if expression.variant == TestExpression.Variant.Separator:\n splitExpressions.append(expressions[wordStart:index])\n wordStart = index + 1\n splitExpressions.append(expressions[wordStart:])\n return splitExpressions", "def split(self, sep=None, maxsplit=None):\n return split(self, sep, maxsplit)", "def split_strings_to_two_char(*, text: str) -> list:\n lenght_text_is_not_even = len(text) % 2 == 1\n\n if lenght_text_is_not_even:\n text += '_'\n\n result = []\n for iterable in range(len(text)//2):\n result.append(text[iterable * 2: iterable * 2 + 2])\n return result", "def test_words_with_sep():\n assert my_splitter(\"bla,bla\", \",\") == [\"bla\", \"bla\"]", "def splitLine(text):\r\n sp = text.split(\" \")\r\n try:\r\n a = sp[0]\r\n b = \" \".join(sp[1:])\r\n except:\r\n a = text\r\n b = \"\"\r\n return a, b", "def separate_pipe(s):\n return s.split('|')", "def extract_text(inp, sep=('(', ')')):\n if sep[0] in inp:\n lsep = inp.find(sep[0])\n rsep = inp.find(sep[1])\n content = inp[lsep+1:rsep]\n ret = \"\".join((inp[:lsep], inp[rsep+1:])).strip()\n return content, ret\n return '', inp", "def split_artist_title(text):\n for separator in SEPARATORS:\n try:\n idx = text.index(separator)\n except ValueError:\n continue\n if idx > -1 and not in_quotes(text, idx):\n return [text[:idx], text[idx + len(separator) :]]", "def test_without_separator():\n assert my_splitter(\"string with !@#$double spaces\") == \\\n [\"string\", \"with\", \"!@#$double\", \"spaces\"]", "def space_injector(space_sep_str: str, desired_str_len: int, sep=' ') -> str:\n sep_str = space_sep_str.split(sep)\n sum_len = 0\n for sstr in sep_str:\n sum_len += len(sstr)\n\n diff = desired_str_len - sum_len\n diff_adj = int(float(diff) / float((len(sep_str) - 1)))\n new_sep = ' ' * diff_adj\n new_str = new_sep.join(sep_str)\n\n return new_str", "def tokenize(text):\n return text.split(' ')", "def tokenize_pt(text):\n #primeiros padrões, separação de palavra de [. , ? ! ( ) [ ] : ; ' ' \" \" ]\n return split_level_two(split_level_one(text))", "def test_separators_only():\n assert my_splitter(\",ad,\", \"ad\") == [\",\", \",\"]", "def splitInPhrase(self,text):\n return self._support.splitInPhrase(text)", "def text_indentation(text):\n if not isinstance(text, str):\n raise TypeError(\"text must be a string\")\n\n new = text.split(\".\")\n new = [x.strip(\" \") for x in new]\n new = '.\\n\\n'.join(new)\n\n new = new.split(\"?\")\n new = [x.strip(\" \") for x in new]\n new = '?\\n\\n'.join(new)\n\n new = new.split(\":\")\n new = [x.strip(\" \") for x in new]\n new = ':\\n\\n'.join(new)\n\n print(new, end=\"\")", "def split_txt(data: str) -> str:\n items = split_txt_multiline(data)\n ret = ' '.join(items)\n\n return ret" ]
[ "0.61333054", "0.5867214", "0.5828028", "0.58185434", "0.5792178", "0.5787775", "0.573595", "0.5613339", "0.5568093", "0.5554935", "0.55341613", "0.5515633", "0.5514492", "0.55078864", "0.5440001", "0.5372514", "0.5367604", "0.53267324", "0.5326295", "0.52897936", "0.52483726", "0.5225409", "0.5223403", "0.522054", "0.52090126", "0.5185933", "0.5185923", "0.5171821", "0.5169567", "0.51586425" ]
0.7283009
0
Return tuple of choices from inText sorted like format and True if all splits are valid and included
def sortedChoices(self, inText): choices = self.splitText(inText) sortedChoices = [text for text in self.formatList if text in choices] if len(choices) == len(sortedChoices): return (sortedChoices, True) else: return (sortedChoices, False)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def complete_opt_allow_select_scan(self, text, *_):\n return [t for t in (\"true\", \"false\", \"yes\", \"no\") if t.startswith(text.lower())]", "def complete_set(self, text, line, begidx, endidx):\n tokens = split(line[:begidx])\n if len(tokens) == 1:\n return [i for i in ('filter ', 'default ', 'time-format ') if i.startswith(text)]\n if len(tokens) == 2 and tokens[1] == 'time-format':\n return [i for i in ('long', 'short') if i.startswith(text)]\n return []", "def _determine_guess(\n sentences: List[List[Literal]]) -> Tuple[bool, Tuple[str, bool]]:\n literals = [x[0] for x in sentences if len(x) == 1]\n if len(literals) != 0:\n literals.sort(key=lambda x: x.atom)\n selected = literals[0]\n if selected.negation:\n return [True, [selected.atom, False]]\n return [True, [selected.atom, True]]\n atoms = [atom for atom in chain.from_iterable(sentences)]\n atoms.sort(key=lambda x: x.atom)\n selected = atoms[0]\n return [False, [selected.atom, True]]", "def getEditChoices(self, currentText=''):\n currentChoices, valid = self.sortedChoices(currentText)\n nonChoices = [text for text in self.formatList\n if text not in currentChoices]\n results = []\n for choice in nonChoices: # menu entries to add a choice\n allChoices = currentChoices + [choice]\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('add'), choice)))\n if currentChoices:\n results.append((None, None)) # separator\n for choice in currentChoices: # menu entries to remove a choice\n allChoices = currentChoices[:]\n allChoices.remove(choice)\n allChoices = [text for text in self.formatList\n if text in allChoices]\n results.append((self.editSep.join(allChoices),\n '(%s %s)' % (_('remove'), choice)))\n return results", "def parse(question):\n # Handle things like \"should ___ X or Y\"\n if question.lower().startswith('should'):\n question = ' '.join(question.split()[2:])\n\n question = question.strip('?')\n # split on both ',' and ' or '\n choices = question.split(',')\n choices = sum((c.split(' or ') for c in choices), [])\n # Get rid of empty strings\n choices = filter(bool, (c.strip() for c in choices))\n return choices", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def IsValid(self):\n return not TickerFull.DelimiterSplit in self.Text", "def _parse_choices(self, text):\n choices = dict()\n\n matches = re.findall(self.choice_regex, text)\n for match in matches:\n # remove the brackets\n match = match.replace('[[', '')\n match = match.replace(']]', '')\n\n if '|' in match:\n # format is {text}|{node_id}, the text and node id are different\n text, node_id = match.split('|')\n choices[node_id] = text\n else:\n choices[match] = match\n\n return choices", "def check_series(text_list, set_list):\n in_list = []\n for word in text_list:\n all_words = re.sub('\\(.*?\\)', ',', word).split(',')\n all_words = list(filter(None, all_words))\n component_in_list = [component.strip(' ') in set_list for component in all_words]\n this_word_in_list = all(component_in_list)\n in_list.append(this_word_in_list)\n return in_list", "def check_order(self, filename: str, section: str, texts: List[str]):\n alphas = sorted(texts, key=lambda x: x.split(':')[0].lower())\n if texts == alphas:\n return\n for text, alpha in zip(texts, alphas):\n if text != alpha:\n print(f'{filename}: {section}: {text} vs {alpha}')\n break", "def check(self, text):\n lt = s = n = 0\n result = False\n for g in text:\n if g in LETTERS and lt < self.letters:\n lt += 1\n if g in NUMBERS and n < self.numbers:\n n += 1\n if g in SYMBOLS and s < self.symbols:\n s += 1\n if n == self.numbers and s == self.symbols and lt == self.letters:\n result = True\n break\n return result", "def test_tokenize_en(self):\n input = \"\"\"This is a paragraph. It's not very special, but it's designed\n2 show how the splitter works with many-different combos\nof words. Also need to \"test\" the handling of 'quoted' words.\"\"\"\n output = [\n (\"This\", 0), (\"is\", 5), (\"a\", 8), (\"paragraph\", 10), (\"It's\", 22),\n (\"not\", 27), (\"very\", 31), (\"special\", 36), (\"but\", 45), (\"it's\", 49),\n (\"designed\", 54), (\"show\", 65), (\"how\", 70), (\"the\", 74),\n (\"splitter\", 78), (\"works\", 87), (\"with\", 93), (\"many\", 98),\n (\"different\", 103), (\"combos\", 113), (\"of\", 120), (\"words\", 123),\n (\"Also\", 130), (\"need\", 135),\n (\"to\", 140), (\"test\", 144), (\"the\", 150), (\"handling\", 154),\n (\"of\", 163), (\"quoted\", 167), (\"words\", 175)\n ]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV)", "def choose(inp):\n if not inp.text:\n return lex.input.missing\n options = [i.strip() for i in inp.text.split(',') if i.strip()]\n if not options:\n return lex.input.incorrect\n return random.choice(options)", "def _validate_selects(text, response):\n answer_options = re.split(settings.MULTISELECT_DELIMITER_RE, str(text))\n choices = map(lambda choice: choice.lower(), response.event.choices)\n logger.debug('Question (%s) answer choices are: %s, given answers: %s' % (datatype, choices, answer_options))\n new_answers = copy(answer_options)\n for idx, opt in enumerate(answer_options):\n logger.debug('Trying to format (m)select answer: \"%s\"' % opt)\n try: \n #in the case that we accept numbers to indicate option selection\n opt_int = int(opt)\n if not (1 <= opt_int <= len(choices)): \n return text, 'Answer %s must be between 1 and %s' % (opt_int, len(choices))\n else:\n new_answers[idx] = str(opt_int)\n\n except ValueError: \n # in the case where we accept the actual text of the question\n logger.debug('Caught value error, trying to parse answer string choice of: %s' % choices)\n if opt.lower() not in choices:\n return text, 'Answer must be one of the choices'\n else:\n new_answers[idx] = str(choices.index(opt.lower()) + 1)\n return ' '.join(new_answers), None", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def _instance_complete(self, text, line, begidx, endidx, notstates):\n choices = []\n for x in self._instancelist():\n if x[1] not in notstates:\n choices.append(x[0])\n\n matches = []\n for x in choices:\n if x.startswith(text):\n matches.append(x)\n return matches", "def is_valid(text):\n return is_all_word_segment_in_text(WORDS, text)", "def solution1(inp):\n rules, _, nearby = inp.strip().split(\"\\n\\n\")\n rules = rules.split(\"\\n\")\n nearby = nearby.split(\"\\n\")[1:]\n\n rrules = []\n for rule in rules:\n a, b = rule.split(\" or \")\n r1 = a.strip().split(\" \")[-1]\n r2 = b.strip()\n def to_range(r):\n i, j = list(map(int, r.split(\"-\")))\n return range(i, j + 1)\n rrules.append((to_range(r1), to_range(r2)))\n\n s = 0\n for ticket in nearby:\n ticket = list(map(int, ticket.split(\",\")))\n for v in ticket:\n valid = False\n for r in rrules:\n valid |= v in r[0] or v in r[1]\n if not valid:\n s += v\n return s", "def complete_set(self, text, line, begidx, endidx):\n # text = line[begidx:endidx] is the word we want to complete\n # split the completed words, should either be ['set'], or ['set', <option_key>]\n split_line = line[:begidx].split()\n if len(split_line) == 1:\n return [option for option in self.get_option_names() if option.startswith(text) or '.' + text in option]\n\n if len(split_line) == 2:\n key = split_line[1]\n options = self.get_options(key)\n if options is not None:\n scoped_key = key.split('.')[1] if '.' in key else key\n values = options.get_acceptable_values(scoped_key)\n if values is not None:\n return [value for value in values if value.startswith(text)]\n\n return []", "def IsValid(self):\n return (TickerFull.DelimiterSplit not in self.Text) and (TickerDataType.DelimiterData in self.Text)", "def test(s, approach):\n s_split = s.split()\n parsed_s = nlp(s)\n for i in xrange(len(parsed_s)):\n if parsed_s[i].tag_ == \"VBZ\":\n if approach(s, i) == 1:\n print str(1) + \":\", \" \".join(s_split[:i]), \\\n \"[{}=>{}]\".format(s_split[i], transform[s_split[i]]), \\\n \" \".join(s_split[i + 1:]) + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)\n else:\n print str(0) + \":\", s + \"\\t({} {})\".format(parsed_s[i], parsed_s[i].tag_)", "def pre_validate(self, form):\n for item1,item2 in self.choices:\n if isinstance(item2, (list, tuple)):\n group_label = item1\n group_items = item2\n for val,label in group_items:\n if val == self.data:\n return\n else:\n val = item1\n label = item2\n if val == self.data:\n return\n raise ValueError(self.gettext('Not a valid choice!'))", "def main_completer_handler(self, text, state):\r\n response = None\r\n all_equals = []\r\n value = False\r\n equals = []\r\n\r\n # Build match list on first iteration else continue\r\n if state == 0:\r\n origline = readline.get_line_buffer()\r\n begin = readline.get_begidx()\r\n end = readline.get_endidx()\r\n being_completed = origline[begin:end]\r\n words = origline.split()\r\n\r\n if not words:\r\n # option for words list\r\n self.current_candidates = sorted(self.options.keys())\r\n else:\r\n # traverse all words entries and passing accordingly\r\n try:\r\n if begin == 0:\r\n # first word\r\n candidates = list(self.options.keys())\r\n else:\r\n # later word\r\n if '=' in words[len(words)-1] and len(words) > 1:\r\n #use possible values as candidates\r\n value = True\r\n equals = words[len(words)-1].split('=')\r\n if equals[1]:\r\n all_equals = [i.split('=') for i in words if '=' in i]\r\n\r\n if len(all_equals) > 1 and not all_equals[-2]\\\r\n [0] == all_equals[-1][0]and self.val_pos > 1:\r\n #reset candidates if new item\r\n candidates = []\r\n else:\r\n candidates = self.options[\"val\"]\r\n else:\r\n #use properties as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n else:\r\n #use command items as candidates\r\n first = words[0]\r\n candidates = self.options[first]\r\n self.possible_vals = []\r\n if being_completed or equals:\r\n #possible value being_completed\r\n if equals:\r\n if equals[1] and not equals[1] in candidates:\r\n #match value\r\n being_completed = equals[1]\r\n else:\r\n #match property\r\n being_completed = equals[0]\r\n # match options with portion of input being completed\r\n self.current_candidates = [w for w in candidates\\\r\n if w and w.lower().startswith(being_completed.lower())]\r\n\r\n # return possible vals\r\n self.possible_vals = []\r\n if len(self.current_candidates) == 1 and 'set' in words[0] or equals:\r\n # second tab, return vals\r\n if being_completed == self.current_candidates[0]:\r\n #grab possible values\r\n for item in self.options['infovals']:\r\n if being_completed == item:\r\n val = self.options['infovals'][item]\r\n try:\r\n if 'Enumeration' in val['Type']:\r\n self.possible_vals = \\\r\n [v['ValueName'] for v in val['Value']]\r\n except:\r\n if 'boolean' in val['type']:\r\n self.possible_vals = [w for w in ['True', 'False']]\r\n elif 'string' in val['type']:\r\n self.possible_vals = [w for w \\\r\n in val['enum'] if w is not None]\r\n\r\n if self.possible_vals and 'null' \\\r\n in val['type']:\r\n self.possible_vals.append('None')\r\n break\r\n if self.possible_vals:\r\n self.options[\"val\"] = self.possible_vals\r\n self.val_pos = 0\r\n # first tab, complete\r\n else:\r\n self.possible_vals.append(self.current_candidates[0])\r\n self.val_pos += 1\r\n else:\r\n # matching empty string so use all candidates\r\n self.current_candidates = candidates\r\n\r\n except (KeyError, IndexError):\r\n self.current_candidates = []\r\n\r\n # Return the state from the match list if found otherwise return None.\r\n try:\r\n if self.possible_vals:\r\n response = self.possible_vals[state]\r\n else:\r\n response = self.current_candidates[state]\r\n except:\r\n # No candidate found for state\r\n response = None\r\n\r\n # Response return\r\n return response", "def check(self, text):\n p = self.d\n i = 0\n j = 0\n result = []\n ln = len(text)\n while i + j < ln:\n t = text[i + j].lower()\n # print i,j,hex(ord(t))\n if not (t in p):\n j = 0\n i += 1\n p = self.d\n continue\n p = p[t]\n j += 1\n # print p,i,j\n if chr(11) in p:\n p = self.d\n result.append(text[i:i + j])\n i = i + j\n j = 0\n return result", "def test_bug1591450(self):\n input = \"\"\"Testing <i>markup</i> and {y:i}so-forth...leading dots and trail--- well, you get-the-point. Also check numbers: 999 1,000 12:00 .45. Done?\"\"\"\n output = [\n (\"Testing\", 0), (\"i\", 9), (\"markup\", 11), (\"i\", 19), (\"and\", 22),\n (\"y\", 27), (\"i\", 29), (\"so\", 31), (\"forth\", 34), (\"leading\", 42),\n (\"dots\", 50), (\"and\", 55), (\"trail\", 59), (\"well\", 68),\n (\"you\", 74), (\"get\", 78), (\"the\", 82), (\"point\", 86),\n (\"Also\", 93), (\"check\", 98), (\"numbers\", 104), (\"Done\", 134),\n ]\n for (itmO, itmV) in zip(output, tokenize_en(input)):\n self.assertEqual(itmO, itmV)", "def create_word_list(self):\n return set(self.split(self.title)+self.split(self.conditions)+self.split(self.interventions))", "def priority_split(text, *splitters):\n present = [s for s in splitters if s in text]\n # fall back to non-present splitter; ensures we have a splitter\n splitters = present + list(splitters)\n splitter = splitters[0]\n return [seg.strip() for seg in text.split(splitter) if seg.strip()]", "def split_into_phrases (self, phrase):\r\n\r\n if not self.contains(phrase,'()'):\r\n\r\n #For a phrase without parantheses\r\n \r\n\r\n if '|' in phrase:\r\n return ['@']+[x for x in phrase.split('|')]\r\n elif '&' in phrase:\r\n return [x for x in phrase.split('&')]\r\n\r\n #If the phrase contains parantheses.\r\n \r\n phrase = list (phrase)\r\n #convert string into a list of chars\r\n level = 0\r\n found = False # if one of the operators is found in the phrase \r\n\r\n for operator in ['#','>','|','&']:\r\n level = 0 # reset level\r\n if not found:\r\n \r\n \r\n for x,char in enumerate(phrase):\r\n if char == '(':\r\n level += 1\r\n if char == ')':\r\n level -=1\r\n # level indicates level within hierarchy established by parantheses\r\n\r\n if level == 0 and x+1 < len(phrase) and phrase[x+1] == operator:\r\n phrase[x+1] = '<<'+operator+'>>'\r\n found = True\r\n break\r\n \r\n \r\n\r\n if '<<&>>' in phrase:\r\n # For AND\r\n phrases = ''.join(phrase).split('<<&>>')\r\n elif '<<|>>' in phrase:\r\n # For OR \r\n phrases = ['@']+''.join(phrase).split('<<|>>')\r\n elif '<<>>>' in phrase:\r\n # For INFERENCE \r\n premise = ''.join(phrase).split('<<>>>')[0]\r\n conclusion = ''.join(phrase).split('<<>>>')[1]\r\n phrases = ['@','~'+premise,conclusion]\r\n # A => B translated as ~A OR B\r\n elif '<<#>>' in phrase:\r\n # FOR EQUIVALENCY \r\n premise = ''.join(phrase).split('<<#>>')[0]\r\n conclusion = ''.join(phrase).split('<<#>>')[1]\r\n \r\n phrase1 = '~'+'('+premise+'&'+'~'+conclusion+')'\r\n phrase2 = '~'+'('+conclusion+'&'+'~'+premise+')'\r\n phrases = [phrase1,phrase2]\r\n # A<>B translated as (~A or B) & (~B or A) \r\n \r\n return [x for x in phrases]" ]
[ "0.58024824", "0.5749306", "0.5671507", "0.55511814", "0.5483213", "0.5446786", "0.54443884", "0.54241514", "0.54228777", "0.52902573", "0.52757835", "0.5264248", "0.5196836", "0.5174921", "0.5168787", "0.5146945", "0.51274663", "0.5122783", "0.5096883", "0.509225", "0.5079589", "0.5070989", "0.50292575", "0.49920854", "0.49731225", "0.4968269", "0.4955272", "0.49335572", "0.49249843", "0.4910313" ]
0.7824299
0
Return a list of choices for setting the init default
def initDefaultChoices(self): return [entry[0] for entry in self.getEditChoices()]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initDefaultChoices(self):\n return []", "def initDefaultChoices(self):\n return [text for text in self.formatList]", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, DateFormat.dateStampStrings[1])\n return choices", "def initDefaultChoices(self):\n choices = [entry[0] for entry in self.getEditChoices()]\n choices.insert(0, TimeFormat.timeStampStrings[1])\n return choices", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self):\n raise NotImplementedError()", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def choices(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs())", "def choices() -> List[str]:\n return [t.name.upper() for t in ConfigurationVariable]", "def choices(self) -> list:\n return [self.mapper(i) for i in self.keys()]", "def as_choices():\n return (\n # Live is currently disabled as a choice\n # pending implementation\n (\"live\", \"Use working directory\"),\n (\"latest\", \"Use latest snapshot\"),\n (\"pinned\", \"Pinned to snapshot\"),\n )", "def choices(self):\n # Needs to be implmented by subclass\n raise Exception(NotImplemented)", "def choices(cls):\n _choices = []\n for attr in _user_attributes(cls):\n val = getattr(cls, attr)\n setattr(cls, attr[1:], val[0])\n _choices.append((val[0], val[1]))\n setattr(cls, 'CHOICES', tuple(_choices))\n return cls", "def valid_options(self):\n choices = self.choices()\n\n if not choices:\n return None\n\n return [opt[0] for opt in choices]", "def choices(cls):\n # return list(map(tuple, cls.__members__.items()))\n return [(int(code), name) for name, code in cls.__members__.items()]", "def choices(self):\n\n if self._choices == None:\n self._choices = [ExperimentChoice(self, choice_name) for choice_name in self.choice_names]\n\n return self._choices", "def get_choices(cls):\n return cls.values.items()", "def __init__(self,choices,caption='ListSelection',default=[],single=False,check=False,sort=False,*args,**kargs):\n InputDialog.__init__(self,caption=caption,items = [\n dict(name='input',value=default,itemtype='list',choices=choices,\n text='',single=single,check=check,sort=sort,*args,**kargs),\n ],)", "def choices(self):\n return tuple(self._choices)", "def get_choices_for_model_field(cls):\n return [c[0:2] for c in cls.attr_choices]", "def multiple_choice():\n\n return [\"MAR\", \"MAR\", \"NI\", \"NI\", \"MCAR\"]", "def choices(cls):\n return tuple(item.as_tuple for item in list(cls))", "def as_choices(cls, key_type=None):\n if key_type is None:\n key_type = cls.get_default_choice_type()\n return cls.enum_class.as_choices(key_type)", "def _set_default_suits(self):\n # set up suits\n suit_types = [('Spades', 1), ('Hearts', 2), ('Diamonds', 3), ('Clubs', 4)]\n # populate the list of suits\n suit_list = list()\n for s in suit_types:\n suit_list.append(Suit(s[0], s[1]))\n\n return suit_list", "def choices(self) -> list:\n return [self.mapper(item) for item in self]", "def _resolve_defaults(self, **kwargs):\n res = list()\n for name, value in kwargs.items():\n if value is None:\n value = self.default(name)\n if value is None:\n raise RuntimeError(f\"Missing default {name}\")\n res.append(value)\n return res", "def __init__(self, *args, **kwargs):\n super(ChoiceFieldType, self).__init__(*args, **kwargs)\n\n self.choices = self.get_field_info_key('choices')", "def get_setting_choices(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n choices = setting.get('choices', None)\n\n if callable(choices):\n # Evaluate the function (we expect it will return a list of tuples...)\n return choices()\n\n return choices", "def initDefaults(self):\n return _libsbml.Parameter_initDefaults(self)", "def get_template_base_dir_choices() -> list[tuple[str, str]]:\n # handle predefined choices\n choices, seen = [], set()\n for template_name in TemplateName:\n choices.append((template_name.value, template_name.label))\n seen.add(template_name.value)\n\n # handle custom choices via settings\n for template_name, display_name in getattr(settings, \"CAST_CUSTOM_THEMES\", []):\n if template_name not in seen:\n choices.append((template_name, display_name))\n seen.add(template_name)\n\n # search for template base directories\n template_directories = get_template_directories()\n template_base_dir_candidates = get_template_base_dir_candidates(template_directories)\n for candidate in template_base_dir_candidates:\n if candidate not in seen:\n choices.append((candidate, candidate))\n\n return choices" ]
[ "0.8790614", "0.8089398", "0.7564882", "0.74508727", "0.70026475", "0.70026475", "0.680855", "0.67124087", "0.6624441", "0.65727", "0.653437", "0.64892524", "0.6431678", "0.6409585", "0.63181144", "0.6240514", "0.6240365", "0.62128824", "0.6163351", "0.6163266", "0.6090951", "0.6066959", "0.6054693", "0.60396624", "0.60294706", "0.60005", "0.5994685", "0.59767413", "0.5959246", "0.592687" ]
0.83091384
1
Sort menu list choices
def sortChoices(self): self.formatList.sort()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SortList(self, key: callable = str.lower):\n temp_list = self.Items\n temp_list.sort(key=key)\n # delete contents of present listbox\n self.delete(0, Tags.End.value)\n # load listbox with sorted data\n for item in temp_list:\n self.insert(Tags.End.value, item)", "def main():\n seq = [48, 11, 45, 92, 32, 61, 65, 57, 29, 96]\n print(selection_sort(seq))", "def application_command_autocomplete_choice_sort_key(choice):\n return choice['name']", "def get_menu_items(self) -> List[str]:\n return sorted(self._items()) # return a copy", "def on_combo_sort_col_names_currentIndexChanged(self, index):\n if self.ui.sort_radio_asc.isChecked():\n self.model.setSort(index, Qt.AscendingOrder)\n else:\n self.model.setSort(index, Qt.DescendingOrder)\n self.model.select()", "def sortedChoices(self, inText):\n choices = self.splitText(inText)\n sortedChoices = [text for text in self.formatList if text in choices]\n if len(choices) == len(sortedChoices):\n return (sortedChoices, True)\n else:\n return (sortedChoices, False)", "def make_act_decision(self, decision):\n choices = decision.choices()\n choices.sort(key=lambda x: self.act_priority(decision, x))\n return choices[-1]", "def select_sort_method():\n st.sidebar.markdown('### Sort method:')\n sort_select = st.sidebar.selectbox('', ['Alphabetically', 'FTE Salary'],\n index=1)\n return sort_select", "def sort(self):\n for section, section_items in self.items():\n if sorted(section_items) == list(section_items):\n continue\n\n section_dict = {k: v for k, v in section_items.items()}\n\n for k in list(section_items):\n self.remove_option(section, k)\n\n for k, v in sorted(section_dict.items()):\n self.set(section, k, v)", "def input_user_choice_sorting(self):\r\n try:\r\n user_choice = input(\"Classer par\\n Ordre alphabétique (entrez '1')\\n Classement ELO (entrez '2')\\n\")\r\n if user_choice == '1' or user_choice == '2':\r\n return user_choice\r\n else:\r\n raise ValueError\r\n except ValueError:\r\n print(\"Veuillez choisir 1 ou 2\")\r\n return self.input_user_choice_sorting()", "def sort_by_type(self):\n # sort_by_type_sitem = self.locator_finder_by_idx(self.sort_by_type_id, 30)\n # sort_by_type_sitem = sort_by_type_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_type_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1) \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n sort_by_type = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[3]/a/label'\n sort_by_type_sitem = self.locator_finder_by_xpath(sort_by_type)\n else:\n sort_by_type_sitem = self.locator_finder_by_xpath(self.sort_by_type_id)\n\n sort_by_type_sitem.click()\n time.sleep(2)", "def sortby(self):\n ...", "def sort_options(command):\n command.params.sort(key=lambda p: p.name)\n return command", "def __editSortSelectedLines(self):\n editor = self.activeWindow()\n if editor:\n editor.sortLines()", "def sort_names(li, by_which):\n \n if by_which == 'first':\n li.sort(key = Name.first)\n elif by_which == 'last':\n li.sort(key = Name.last)", "def order_queryset_by_sort_order(get, qs):\n\n def get_string_from_tuple_list(lstTuples, number):\n \"\"\"Get the string value corresponding to a number in a list of number-string tuples\"\"\"\n sBack = [tup[1] for tup in lstTuples if tup[0] == number]\n return sBack\n\n # Helper: order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\n def order_queryset_by_tuple_list(qs, sOrder, sListName):\n \"\"\"Order a queryset on field [sOrder], which is a number from a list of tuples named [sListName]\"\"\"\n\n # Get a list of tuples for this sort-order\n tpList = build_choice_list(sListName)\n # Determine sort order: ascending is default\n bReversed = False\n if (sOrder[0:1] == '-'):\n # A starting '-' sign means: descending order\n sOrder = sOrder[1:]\n bReversed = True\n\n # Order the list of tuples alphabetically\n # (NOTE: they are alphabetical from 'build_choice_list()', except for the values 0,1)\n tpList = sorted(tpList, key=operator.itemgetter(1))\n # Order by the string-values in the tuple list\n return sorted(qs, key=lambda x: get_string_from_tuple_list(tpList, getattr(x, sOrder)), reverse=bReversed)\n\n # Set the default sort order\n sOrder = 'woord' # Default sort order if nothing is specified\n # See if the form contains any sort-order information\n if ('sortOrder' in get and get['sortOrder'] != ''):\n # Take the user-indicated sort order\n sOrder = get['sortOrder']\n\n # The ordering method depends on the kind of field:\n # (1) text fields are ordered straightforwardly\n # (2) fields made from a choice_list need special treatment\n if (sOrder.endswith('handedness')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handedness\")\n elif (sOrder.endswith('domhndsh') or sOrder.endswith('subhndsh')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Handshape\")\n elif (sOrder.endswith('locprim')):\n ordered = order_queryset_by_tuple_list(qs, sOrder, \"Location\")\n else:\n # Use straightforward ordering on field [sOrder]\n ordered = qs.order_by(sOrder)\n\n # return the ordered list\n return ordered", "def change_sort(self, sorting_choice):\r\n self.message = \"place have been sorted by: {}\".format(sorting_choice)\r\n self.place_list.sort(sorting_choice)\r\n self.root.ids.entriesBox.clear_widgets()\r\n self.create_widget()\r\n sort_index = self.sort_choices.index(sorting_choice)\r\n self.current_sort = self.sort_choices[sort_index]", "def display_actor_list(self):\r\n actor_list = list()\r\n for actor in players_table:\r\n actor_list.append(actor)\r\n user_choice = self.input_user_choice_sorting()\r\n print(\"Liste de tous les acteurs: \")\r\n if user_choice == '1':\r\n actor_list.sort(key=lambda x: x['Nom'])\r\n for player in actor_list:\r\n print(player)\r\n elif user_choice == '2':\r\n actor_list.sort(reverse=True, key=lambda x: x['ELO'])\r\n for player in actor_list:\r\n print(player)", "def test_sort(self):\n sort_field = MoveSearchForm.sort\n for value, label in sort_field.kwargs['choices']:\n response = self.do_search(id=u'1', sort=value)\n self.assert_(\n response.tmpl_context.results,\n \"\"\"Sort by {0} doesn't crash\"\"\".format(value)\n )", "def choose_sort_key(self):\n\n global st_sort_key\n global st_reverse_sort\n\n sort_choices = [\n ('*Reverse*', None),\n ('Company name', stock.stock_key_name),\n ('Symbol', stock.stock_key_symb),\n ('Price', stock.stock_key_price),\n ('Change', stock.stock_key_change),\n ('Change percent', stock.stock_key_change_percent)\n ]\n\n self.lock.acquire()\n self.clear_main()\n w = self.windows['MAIN']\n line = 1\n\n for choice, func in sort_choices:\n w.addstr(line, 0, '%2d' % line, curses.A_BOLD | curses.color_pair(1))\n w.addstr(line, 3, choice)\n line += 1\n\n self.refresh()\n\n # Wait for the user to give is a key.\n while True:\n c = self.stdscr.getch()\n\n if c < ord('1') and c > ord('9'):\n continue\n\n index = c - ord('1')\n\n if index < len(sort_choices):\n break\n\n self.lock.release()\n\n # Set the new sort function.\n if index == 0:\n st_reverse_sort = not st_reverse_sort\n else:\n _, st_sort_key = sort_choices[index]\n\n self.lock.acquire()\n self.display_portfolio(self.active_portfolio)\n self.lock.release()", "def sort_by_name(self):\n # sort_by_name_sitem = self.locator_finder_by_idx(self.sort_by_name_id)\n # sort_by_name_sitem = sort_by_name_sitem.find_element_by_xpath(\"./..\")\n # while True:\n # try:\n # sort_by_name_sitem.click()\n # break\n # except ElementNotInteractableException:\n # time.sleep(1)\n \n if self.current_package_version() == semver.VersionInfo.parse(\"3.8.0\"):\n name = '//*[@id=\"collectionsDropdown\"]/ul[3]/li[2]/a/label'\n sort_by_name_sitem = self.locator_finder_by_xpath(name)\n else:\n sort_by_name_sitem = self.locator_finder_by_xpath(self.sort_by_name_id)\n sort_by_name_sitem.click()\n time.sleep(2)", "def selection_sort(lista):\n for index in range(0, len(lista)):\n min_index = index\n\n for right in range(index + 1, len(lista)):\n if lista[right] < lista[min_index]:\n min_index = right\n\n lista[index], lista[min_index] = lista[min_index], lista[index]", "def add_arguments(self, actions):\n actions = sorted(actions, key=attrgetter('option_strings'))\n super(SortingHelpFormatter, self).add_arguments(actions)", "def sort(self, args):\n if not args:\n self.err_print('One argument required')\n return\n\n _key = args[0]\n cur = self.ui.leftwin.highlighted().data\n try:\n ind = song.tags.index(_key)\n cur.change_sort(ind)\n self.ui.rightwin.disp()\n except:\n self.err_print('\"{}\" is not a valid key to sort by'.format(_key))", "def get_all_menu():", "def sorter(Plugin):\n return Plugin.order", "def selection_sort(l):\n walk = 0\n while walk < len(l):\n i = walk\n while i < len(l):\n if l[i] < l[walk]:\n # swap i and walk\n tmp = l[walk]\n l[walk] = l[i]\n l[i] = tmp\n i += 1\n walk += 1\n return", "def selSort(L):\n\tfor i in range(len(L) - 1):\n\t\tminIndx = i\n\t\tminVal= L[i]\n\t\tj = i + 1\n\t\twhile j < len(L):\n\t\t\tif minVal > L[j]:\n\t\t\tminIndx = j\n\t\t\tminVal= L[j]\n\t\t\tj += 1\n\t\ttemp = L[i]\n\t\tL[i] = L[minIndx]\n\t\tL[minIndx] = temp", "def sorted_options(sort_options):\n return [\n {\n \"title\": v[\"title\"],\n \"value\": (\n \"-{0}\".format(k)\n if v.get(\"default_order\", \"asc\") == \"desc\"\n else k\n ),\n }\n for k, v in sorted(\n sort_options.items(), key=lambda x: x[1].get(\"order\", 0)\n )\n ]", "def test_categories_are_sorted(self):\n self.data_sorted(self.test_data['shirts'], self.test_data['pants'])" ]
[ "0.6233976", "0.6216146", "0.6190673", "0.598364", "0.59764874", "0.5956966", "0.59059286", "0.5865083", "0.58410734", "0.58394575", "0.5783629", "0.5779133", "0.5769487", "0.57629895", "0.5747787", "0.57295066", "0.57123274", "0.5697192", "0.5693883", "0.567952", "0.56586474", "0.5655346", "0.5652303", "0.5623778", "0.5623775", "0.5617385", "0.56035554", "0.56031", "0.5595827", "0.55749387" ]
0.77374196
0
Set initial value from editor version using edit format option
def setInitDefault(self, editText): if editText in DateFormat.dateStampStrings: self.initDefault = DateFormat.dateStampStrings[0] else: TextFormat.setInitDefault(self, editText)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return DateFormat.dateStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_editor_save(self):\n self.text = self.textWidget.get(\"1.0\", tk.END)", "def setModelData(self, editor, model, index):\n try:\n date = datetime.strptime(str(editor.text()), self.format)\n model.setData(index, date, Qt.EditRole)\n except:\n pass # If the text does not conform to the date format, do nothing.", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def initFormat(self):\n pass", "def update_editor ( self ):\n font = self.factory.to_wx_font( self )\n try:\n self._facename.SetStringSelection( font.GetFaceName() )\n except:\n self._facename.SetSelection( 0 )\n try:\n self._point_size.SetStringSelection( str( font.GetPointSize() ) )\n except:\n self._point_size.SetSelection( 0 )\n font.SetPointSize( min( 10, font.GetPointSize() ) )\n self._font.SetValue( self.str_value )\n self._font.SetFont( font )", "def set_format(cls,format):\n import __main__\n IP = __main__.__dict__['__IP']\n prompt = getattr(IP.outputcache,cls._prompt)\n prompt.p_template = format\n prompt.set_p_str()\n cls._format = format", "def _set_settings_version(c, settings_path, version_line):\n version_const = \"VERSION\"\n\n print(f\"Adjusting {version_const} in {settings_path} to {version_line}...\")\n c.run(f'sed -i .orig \\'s/^{version_const} =.*$/{version_const} = \"{version_line}\"/\\' \"{settings_path}\"')", "def readVersion(self):\n ds = self.root.findall(\"[@format]\")[0]\n raw_format = ds.attrib['format']\n try:\n self.documentFormatVersion = int(raw_format)\n except ValueError:\n # as of fontTools >= 3.27 'format' is formatted as a float \"4.0\"\n self.documentFormatVersion = float(raw_format)", "def setValue(self,val):\n if self._plain:\n self.input.setPlainText(str(val))\n else:\n updateText(self.input,str(val))", "def defaultLoad (self):\n self.srcEditor.setText( \"\" )\n self.srcEditor.setFocus()\n self.setReadOnly( readOnly=False )", "def createEditor(self, parent, option, index):\n editor = QLineEdit(parent)\n date = index.model().data(index, Qt.DisplayRole)\n editor.setText(date.strftime(self.format))\n return editor", "def testSetEditorValue(self):\r\n \r\n lineEdit = QtGui.QLineEdit()\r\n self._editorFactory.setEditorValue(lineEdit, u\"Test\")\r\n self.assertTrue(lineEdit.text() == u\"Test\" )\r\n \r\n spinBox = QtGui.QDoubleSpinBox()\r\n self._editorFactory.setEditorValue(spinBox, 2.05)\r\n self.assertTrue(spinBox.value() == 2.05)\r\n \r\n checkBox = QtGui.QCheckBox()\r\n self._editorFactory.setEditorValue(checkBox, True)\r\n self.assertTrue(checkBox.isChecked() == True)", "def edit():", "def setValue(self,val):\n val = str(val)\n if self._plain:\n self.input.setText(val)\n else:\n updateText(self.input,val)", "def set_initial(self, value):\n # TODO: Make an Initial Stock Adjust here\n pass", "def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)", "def initFormat(self):\n self.html = True", "def initFormat(self):\n self.html = True", "def setValue(self,val):\n val = int(val)\n self.input.setText(str(val))", "def after_init(self):\n if self.options.format.appended:\n self.error_format = self.options.format.appended[0]", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def setEditorData(self, ledit, midx):\n cond = self._sel.give_cond(midx.row())\n val = cond[midx.column()]\n txt = \"\"\n if val is not None:\n txt = str(val)\n ledit.setText(txt)", "def edition(self, key, value):\n return clean_val(\"a\", value, str).replace(\"ed.\", \"\")", "def on_widget_edited(self, value): # this is a slot\n # note this is exactly the same as @value.setter...\n self.value = value", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)" ]
[ "0.62732863", "0.62497866", "0.62184155", "0.6095079", "0.6085302", "0.6077205", "0.6007599", "0.57824373", "0.5620094", "0.552909", "0.54974353", "0.5490214", "0.5446925", "0.54123217", "0.53948015", "0.5381172", "0.53762734", "0.53609794", "0.5341126", "0.5302158", "0.5288958", "0.5286798", "0.5286798", "0.5260249", "0.52585703", "0.5220923", "0.5220923", "0.5215406", "0.5184883", "0.5160404" ]
0.6298632
0
Return initial value in edit format, found in edit format option
def getEditInitDefault(self): if self.initDefault in DateFormat.dateStampStrings: return DateFormat.dateStampStrings[1] return TextFormat.getEditInitDefault(self)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getEditInitDefault(self):\n return self.formatEditText(self.initDefault)[0]", "def getEditInitDefault(self):\n if self.initDefault in TimeFormat.timeStampStrings:\n return TimeFormat.timeStampStrings[1]\n return TextFormat.getEditInitDefault(self)", "def format(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"format\")", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def setInitDefault(self, editText):\n self.initDefault = self.storedText(editText)[0]", "def setInitDefault(self, editText):\n if editText in DateFormat.dateStampStrings:\n self.initDefault = DateFormat.dateStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def setInitDefault(self, editText):\n if editText in TimeFormat.timeStampStrings:\n self.initDefault = TimeFormat.timeStampStrings[0]\n else:\n TextFormat.setInitDefault(self, editText)", "def on_edit_changed(self, edit):\n\t\tself.emit('value-changed', edit.get_text())", "def assign_format(self):\n if self.is_output or self.is_req_output:\n if self.pname in self.tool_data[self.tool_name]['output_fmt']:\n return self.tool_data[self.tool_name]['output_fmt'][self.pname]\n elif self.pname in self.gen_out_fmt:\n return self.gen_out_fmt[self.pname]\n elif self.is_input:\n if self.pname in self.tool_data[self.tool_name]['input_fmt']:\n print(self.tool_data[self.tool_name])\n return self.tool_data[self.tool_name]['input_fmt'][self.pname]\n elif self.pname in self.gen_in_fmt:\n return self.gen_in_fmt[self.pname]\n else:\n # Not sure yet what this will be used for, but I think we need it.\n return ''", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def get_initial(self):\n\t\treturn self.initial", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def make_input(value):\n what = value[0]\n if what.startswith('not editable'):\n what = what.replace('not editable:', '').replace(\"'\", \"\\'\")\n if what.startswith('attribute:value:'):\n what = what.replace('attribute:value:', '').replace(\"'\", \"\\'\")\n return what", "def get_val_str(self):\n fmt_str = self.template.get_format_str()\n if self.val_obj is None:\n return \"\"\n elif fmt_str:\n return fmt_str % (self.val_obj.val)\n else:\n return str(self.val_obj.val)", "def get_opt_formatted(self, command):\n if \"formatted\" in self.command_dict[\"commands\"][command].keys():\n return self.command_dict[\"commands\"][command][\"formatted\"]\n else:\n return CommandDict.DEFAULT_OPT_FORMATTED", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatsrc(self):\n return self[\"formatsrc\"]", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def get_initial(self):\n return self.initial", "def getInitDefault(self):\n if self.initDefault in DateFormat.dateStampStrings:\n return GenDate().dateStr()\n return TextFormat.getInitDefault(self)", "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def format(self):\n return self.getparam(\"FORMAT\")", "def format(self):\n return self.getparam(\"FORMAT\")", "def _getAlterToFormat(cls, alter):\n if alter == '':\n alter = ['', '']\n if isinstance(alter, str): # nothing to do if it is dict\n alter = ['', alter]\n return alter", "def value(self):\n return str(self.input.currentText())", "def presentation(self, value):\r\n return value", "def default_formatter(self, data):\n return data", "def get_format(self):\n return self._format[0]", "def initial_value(self):\n return self._initial_value" ]
[ "0.7327655", "0.6777391", "0.6163817", "0.5962609", "0.59190995", "0.5825621", "0.5639453", "0.55958575", "0.5588548", "0.55880916", "0.55728984", "0.5547174", "0.55372924", "0.5518307", "0.55125266", "0.54999983", "0.54888153", "0.54888153", "0.54887563", "0.5471209", "0.5466419", "0.545412", "0.54069316", "0.54069316", "0.53839904", "0.5377033", "0.53640187", "0.53620666", "0.5355793", "0.5349789" ]
0.69168735
1
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): if value.startswith('now'): return repr(GenDate()) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenTime())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def check(self, comparison, value, value_type, second_value=None):\n now = datetime.now()\n if value_type == \"WEEKDAY\":\n if comparison not in [\"NE\", \"E\", \"WEEKDAY\", \"WEEKEND\"]:\n raise Exception(f\"Comparison {comparison} \"\n \"not valid for WEEKDAY\")\n if comparison == \"E\":\n return now.weekday() == value\n elif comparison == \"NE\":\n return now.weekday() != value\n elif comparison == \"WEEKDAY\":\n return now.weekday() < 5 # ISO counts from 0\n else:\n return now.weekday() > 4 # so Sat,Sun are 5,6\n if value_type == \"DATE\":\n dt = datetime.strptime(value, DATE_FMT)\n dt = dt.date()\n now = now.date()\n elif value_type == \"TIME\":\n dt = datetime.strptime(value, TIME_FMT)\n dt = dt.time()\n now = now.time()\n else:\n dt = datetime.strptime(value, DATETIME_FMT)\n if comparison == \"LE\":\n return now <= dt\n elif comparison == \"E\":\n return now == dt\n elif comparison == \"GE\":\n return now >= dt\n # At this point, we're doing either IN or OUT, so read second time\n # format\n if value_type == \"DATE\":\n second = datetime.strptime(second_value, DATE_FMT)\n second = second.date()\n elif value_type == \"TIME\":\n second = datetime.strptime(second_value, TIME_FMT)\n second = second.time()\n else:\n second = datetime.strptime(second_value, DATETIME_FMT)\n if comparison == \"IN\":\n return now >= dt and now <= second\n elif comparison == \"OUT\":\n return now <= dt or now >= second", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def greater_than_or_equal(self) -> global___Expression:", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def less_than_or_equal(self) -> global___Expression:", "def current_time(cls) -> float:", "def next_update_in(self, now):\n # Never updated: NOW!\n if self.last_tested is None:\n return 0.0\n\n # Was updated\n seconds_ago = (now - self.last_tested).total_seconds()\n delay = self.real_period - seconds_ago\n return max(delay, 0.0) # don't allow it to be negative", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def test_process_filter_value():\n now = dt.utcnow()\n now_ts = now.timestamp()\n filter_ = {'column': \"ts_created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now\n\n filter_ = {'column': \"created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now_ts", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def newer(a, b):\n\treturn modtime(a) < modtime(b) # smaller is earlier", "def test_larger_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 3, 45)\n rhs = datetime(2012, 9, 20, 2, 45)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_get_current_time_is_constant() -> None:\n time_provider = TimeProvider()\n current_time_1 = time_provider.get_current_time()\n current_time_2 = time_provider.get_current_time()\n\n assert current_time_1 == current_time_2", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def search_cond(ts):\n ts = convert(ts, mode='timestamp')\n at = [\"year\", \"month\", \"day\", \"hour\", \"minute\"]\n if all(getattr(ts, a) == getattr(upper_bound, a) for a in at):\n return 0\n elif ts < upper_bound:\n return -1\n elif ts > upper_bound:\n return 1", "def time_before(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a < time_b", "def native_value(self) -> float:\n if (self.coordinator.data is None) or (self._last_updated is not None and \"last_updated\" in self.coordinator.data and self._last_updated > self.coordinator.data[\"last_updated\"]):\n self._attributes[\"last_updated_timestamp\"] = self._last_updated\n return self._state\n \n self._attributes[\"last_updated_timestamp\"] = self.coordinator.data[\"last_updated\"]\n self._state = self.coordinator.data[\"charge_limit_weekday\"]\n \n return self._state", "def less_than(self) -> global___Expression:", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func" ]
[ "0.7139558", "0.61179876", "0.6032351", "0.60296464", "0.5851021", "0.5813862", "0.5813862", "0.58051044", "0.5691663", "0.56752634", "0.563707", "0.55923384", "0.5590049", "0.55772096", "0.5574569", "0.5568509", "0.5496361", "0.54885936", "0.54716676", "0.5465949", "0.5443612", "0.5395156", "0.536688", "0.5360831", "0.53564024", "0.53204775", "0.53169084", "0.5310457", "0.5301735", "0.5275172" ]
0.689713
1
Return conditional comparison value with realtime adjustments, used for date and time types' 'now' value
def adjustedCompareValue(self, value): if value.startswith('now'): return repr(GenTime()) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def adjustedCompareValue(self, value):\n if value.startswith('now'):\n return repr(GenDate())\n return value", "def condition(self):\n HH = str(time.localtime().tm_hour)\n MM = str(time.localtime().tm_min)\n return eval(self._cond_str)", "def get_state_by_time(python_time):\n present = datetime.now()\n\n if python_time <= present:\n return 2\n else:\n return 1", "def now(self):\n return conditional_now() + self.timedelta(**self.now_shift_kwargs)", "def check(self, comparison, value, value_type, second_value=None):\n now = datetime.now()\n if value_type == \"WEEKDAY\":\n if comparison not in [\"NE\", \"E\", \"WEEKDAY\", \"WEEKEND\"]:\n raise Exception(f\"Comparison {comparison} \"\n \"not valid for WEEKDAY\")\n if comparison == \"E\":\n return now.weekday() == value\n elif comparison == \"NE\":\n return now.weekday() != value\n elif comparison == \"WEEKDAY\":\n return now.weekday() < 5 # ISO counts from 0\n else:\n return now.weekday() > 4 # so Sat,Sun are 5,6\n if value_type == \"DATE\":\n dt = datetime.strptime(value, DATE_FMT)\n dt = dt.date()\n now = now.date()\n elif value_type == \"TIME\":\n dt = datetime.strptime(value, TIME_FMT)\n dt = dt.time()\n now = now.time()\n else:\n dt = datetime.strptime(value, DATETIME_FMT)\n if comparison == \"LE\":\n return now <= dt\n elif comparison == \"E\":\n return now == dt\n elif comparison == \"GE\":\n return now >= dt\n # At this point, we're doing either IN or OUT, so read second time\n # format\n if value_type == \"DATE\":\n second = datetime.strptime(second_value, DATE_FMT)\n second = second.date()\n elif value_type == \"TIME\":\n second = datetime.strptime(second_value, TIME_FMT)\n second = second.time()\n else:\n second = datetime.strptime(second_value, DATETIME_FMT)\n if comparison == \"IN\":\n return now >= dt and now <= second\n elif comparison == \"OUT\":\n return now <= dt or now >= second", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def __gt__(self, other):\n return self.to_seconds() > other.to_seconds()", "def conditional_value(self) -> global___Expression.ConditionalOperator:", "def get(self):\n now = datetime.datetime.utcnow()\n if now > self.time_of_next_update:\n self._update_value()\n return self.value", "def set_when(day, today):\n if day < today:\n return \"past\"\n if day == today:\n return \"present\"\n return \"future\"", "def greater_than_or_equal(self) -> global___Expression:", "def __cmp__(self, other):\n return (self._cmp(self.seconds, other.seconds)\n or self._cmp(self.nanosecond, other.nanosecond))", "def __gt__(self, other):\n if self.date > other.date:\n return True\n else:\n return False", "def test_expression_dates(self):\n import datetime\n import time\n time1 = datetime.datetime.now()\n time.sleep(0.01)\n time2 = datetime.datetime.now()\n\n # Checks on a specified attribute with operators \"==\" and \"!=\" with integers\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time2 with models.Network.id=time1\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at < time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at > time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time2}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time1)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertTrue(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")\n\n expression = BooleanExpression(\"NORMAL\", models.Network.updated_at == time2)\n value = expression.evaluate(KeyedTuple([{\"updated_at\": time1}], [\"networks\"]))\n self.assertFalse(value, \"models.Network.updated_at < time1 with models.Network.id=time2\")", "def less_than_or_equal(self) -> global___Expression:", "def current_time(cls) -> float:", "def next_update_in(self, now):\n # Never updated: NOW!\n if self.last_tested is None:\n return 0.0\n\n # Was updated\n seconds_ago = (now - self.last_tested).total_seconds()\n delay = self.real_period - seconds_ago\n return max(delay, 0.0) # don't allow it to be negative", "def after(v1,v2):\n return v1.time_left>v2.time_left", "def check_time_since_last_data(device_origin):\n actual_time = time.time()\n sec_since_last_data = actual_time - mon_item.read_device_status_values(device_origin)[1]\n min_since_last_data = sec_since_last_data / 60\n min_since_last_data = int(min_since_last_data)\n latest_data_hr = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(latest_data))\n return min_since_last_data", "def test_process_filter_value():\n now = dt.utcnow()\n now_ts = now.timestamp()\n filter_ = {'column': \"ts_created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now\n\n filter_ = {'column': \"created_at\", 'value': now_ts, type: 'leq'}\n assert process_filter_value(filter_) == now_ts", "def compare(x, y):\n if x >= y:\n return 1.0\n else:\n return 0.0", "def newer(a, b):\n\treturn modtime(a) < modtime(b) # smaller is earlier", "def test_larger_lhs(self):\n from sosbeacon.utils import get_latest_datetime\n\n lhs = datetime(2012, 9, 20, 3, 45)\n rhs = datetime(2012, 9, 20, 2, 45)\n\n result = get_latest_datetime(lhs, rhs)\n\n self.assertIs(lhs, result)", "def test_get_current_time_is_constant() -> None:\n time_provider = TimeProvider()\n current_time_1 = time_provider.get_current_time()\n current_time_2 = time_provider.get_current_time()\n\n assert current_time_1 == current_time_2", "def _compare(self, value, target):\n result = getattr(self.reg, target) - value\n self.reg.N = result >> 7\n self.reg.C = getattr(self.reg, target) >= value\n self.reg.Z = result == 0", "def search_cond(ts):\n ts = convert(ts, mode='timestamp')\n at = [\"year\", \"month\", \"day\", \"hour\", \"minute\"]\n if all(getattr(ts, a) == getattr(upper_bound, a) for a in at):\n return 0\n elif ts < upper_bound:\n return -1\n elif ts > upper_bound:\n return 1", "def time_before(time_a, time_b=None) -> bool:\n if time_b is None:\n time_b = time_now()\n\n # make sure both times are floats\n time_a = float(date_to_epoch(time_a))\n time_b = float(date_to_epoch(time_b))\n return time_a < time_b", "def native_value(self) -> float:\n if (self.coordinator.data is None) or (self._last_updated is not None and \"last_updated\" in self.coordinator.data and self._last_updated > self.coordinator.data[\"last_updated\"]):\n self._attributes[\"last_updated_timestamp\"] = self._last_updated\n return self._state\n \n self._attributes[\"last_updated_timestamp\"] = self.coordinator.data[\"last_updated\"]\n self._state = self.coordinator.data[\"charge_limit_weekday\"]\n \n return self._state", "def less_than(self) -> global___Expression:", "def _comparison_function(comp, value=0.0, **kwargs):\n if comp == 'g' or comp == '>':\n func = np.greater\n elif comp == 'ge' or comp == '>=':\n func = np.greater_equal\n elif comp == 'l' or comp == '<':\n func = np.less\n elif comp == 'le' or comp == '<=':\n func = np.less_equal\n elif comp == 'e' or comp == '=' or comp == '==':\n func = np.equal\n elif comp == 'ne' or comp == '!=':\n func = np.not_equal\n else:\n raise ValueError(\"Unrecognized comparison '{}'.\".format(comp))\n\n def comp_func(xx):\n return func(xx, value, **kwargs)\n\n return comp_func" ]
[ "0.68978775", "0.6119225", "0.6032928", "0.6031085", "0.5851345", "0.5813949", "0.5813949", "0.5806566", "0.56923884", "0.5676319", "0.5638131", "0.55917704", "0.55903226", "0.55771613", "0.55758727", "0.5568553", "0.54968023", "0.54886967", "0.54710984", "0.54660606", "0.544368", "0.53952295", "0.5366562", "0.5361186", "0.53564334", "0.53201556", "0.5318052", "0.5310881", "0.5303645", "0.52755153" ]
0.71401125
0
Return tuple of stored text from edited text and bool validity, using edit format option
def storedText(self, editText): try: return (repr(GenBoolean(editText)), True) except GenBooleanError: if editText in self.formatList: return (editText, True) return (editText, not editText and not self.isRequired)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def storedText(self, editText):\n if editText in self.formatList:\n return (editText, True)\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n return (storedText, True)", "def formatEditText(self, storedText):\n return (storedText, True)", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def formatEditText(self, storedText):\n for choice in self.splitText(storedText):\n if choice not in self.formatList:\n return (storedText, not storedText)\n return (storedText, True)", "def storedText(self, editText):\n if editText:\n return (editText, True)\n return (editText, not self.isRequired)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n if storedText in self.formatList:\n return (storedText, True)\n try:\n return (GenBoolean(storedText).boolStr(self.format), True)\n except GenBooleanError:\n return (storedText, not storedText)", "def storedText(self, editText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (repr(GenDate().setFromStr(editText, format)), True)\n except GenDateError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenNumber().setFromStr(editText, self.format)), True)\n except GenNumberError:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n choices, valid = self.sortedChoices(editText)\n if valid:\n return (self.editSep.join(choices), True)\n else:\n return (editText, not editText and not self.isRequired)", "def storedText(self, editText):\n try:\n return (repr(GenTime(editText)), True)\n except GenTimeError:\n return (editText, not editText and not self.isRequired)", "def formatEditText(self, storedText):\n try:\n return (GenNumber(storedText).numStr(self.format), True)\n except GenNumberError:\n return (storedText, not storedText)", "def storedText(self, editText):\n return (editText, editText or not self.isRequired)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditDateFormat', True)\n try:\n return (GenDate(storedText).dateStr(format), True)\n except GenDateError:\n return (storedText, not storedText)", "def formatEditText(self, storedText):\n format = globalref.options.strData('EditTimeFormat', True)\n try:\n return (GenTime(storedText).timeStr(format), True)\n except GenTimeError:\n return (storedText, not storedText)", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def edit(self, text):\n return self._edit_engine(text, break_on_success=False)", "def get_data_from_nonformat_text():\n pass", "def getEditChoices(self, currentText=''):\n return [(text, '') for text in self.formatList]", "def syntax_text():", "def on_edit(self, event, text):\n return None", "def process_output(self, text): # pylint: disable=no-self-use\n changed = False\n return changed, text", "def reformat(ctx):\n pass", "def main(text,result=\"latex\",check_text=True,index_project='',file_name=''): # TODO detect and make a warning for genuine latex marks\n if isinstance(text,str):\n text = text.split('\\n')\n \n if check_text:\n check(text)\n \n print(text)\n \n ### managing placeholders\n text = parsers['v'].main(text)\n \n ### saving names\n if index_project:\n indexer.parse(text,index_project,file_name)\n \n \n for i in range(len(text)):\n line = text[i]\n ### managing end of line\n line = line.replace(\" ,,\",\"\\\\\\\\\")\n \n while line.count(opening_mark):\n first_part, mark, late_part = line.partition(',;')\n if not late_part:\n break\n late_part, text = parsers[late_part[0]].main(late_part = late_part,\n text=text,\n result=result,\n line_nb = i)\n line = first_part + late_part\n text[i] = line\n \n return '\\n'.join(text)", "def analyze_text(words, format_bolds, format_italics, de_condition={'bold':1, 'italic':-1}, en_condition={'bold':0, 'italic':-1}, end_of_item_eol=False, eoi_bold_to_unbold=False, eoi_unbold_to_bold=True, eoi_italic_to_unitalic=False, eoi_unitalic_to_italic=False):\n table = []\n de_words = ['']\n en_words = ['']\n if de_condition['bold'] != -1: de_condition['bold'] = not(not(de_condition['bold'])) # convert integer to boolean\n if de_condition['italic'] != -1: de_condition['italic'] = not(not(de_condition['italic']))\n \n last_format_bold = False\n last_format_italic = False\n de_bold_ok = False\n de_italic_ok = False\n en_bold_ok = False\n en_italic_ok = False\n item_no = 0\n for k in range(len(words)):\n if format_bolds[k] == False:\n new_format_bold = False\n elif format_bolds[k] == True:\n new_format_bold = True\n if format_italics[k] == False:\n new_format_italic = False\n elif format_italics[k] == True:\n new_format_italic = True\n\n # Check condition for switching to the new item\n flag_new = True\n if item_no>0 and end_of_item_eol and words[k]!='\\n': flag_new = False\n if item_no>0 and eoi_bold_to_unbold and not(last_format_bold and not new_format_bold): flag_new = False\n if item_no>0 and eoi_unbold_to_bold and not(not last_format_bold and new_format_bold): flag_new = False\n if item_no>0 and eoi_italic_to_unitalic and not(last_format_italic and not new_format_italic): flag_new = False\n if item_no>0 and eoi_unitalic_to_italic and not(not last_format_italic and new_format_italic): flag_new = False\n last_format_bold = new_format_bold\n last_format_italic = new_format_italic\n if flag_new:\n de_words.append('')\n en_words.append('')\n item_no += 1\n \n if words[k]!='\\n': # also format_bolds[k] and format_italics[k] must be either True or False\n if de_condition['bold'] != -1:\n de_bold_ok = not de_condition['bold']^format_bolds[k] # use XOR operator\n else: de_bold_ok = True\n if de_condition['italic'] != -1:\n de_italic_ok = not de_condition['italic']^format_bolds[k]\n else: de_italic_ok = True\n if de_bold_ok and de_italic_ok: de_words[item_no] += words[k]\n \n if en_condition['bold'] != -1:\n en_bold_ok = not en_condition['bold']^format_bolds[k]\n else: en_bold_ok = True\n if en_condition['italic'] != -1:\n en_italic_ok = not en_condition['italic']^format_bolds[k]\n else: en_italic_ok = True\n if en_bold_ok and en_italic_ok: en_words[item_no] += words[k]\n else:\n # Check if this end-of-line should be converted to a space, to connect lines\n if de_words[item_no] != '' and de_bold_ok and de_italic_ok: de_words[item_no] += ' '\n if en_words[item_no] != '' and en_bold_ok and en_italic_ok: en_words[item_no] += ' ' \n \n return de_words, en_words" ]
[ "0.78716373", "0.75691116", "0.75691116", "0.7379154", "0.73117137", "0.7183602", "0.7152062", "0.7089976", "0.6903923", "0.6863199", "0.68065554", "0.6748621", "0.6604557", "0.62711895", "0.61224514", "0.6009547", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5690611", "0.5534457", "0.5529326", "0.55119324", "0.54897064", "0.54593766", "0.53941077", "0.53884834", "0.53541094", "0.5348279" ]
0.76830506
1
Return the next value for a new node, increment format if increment is True
def nextValue(self, increment=True): try: prefix, numText, suffix = UniqueIDFormat.formatRe.\ match(self.format).groups() except AttributeError: self.format = UniqueIDFormat.defaultFormat return self.nextValue(increment) value = self.format if increment: pattern = u'%%s%%0.%dd%%s' % len(numText) num = int(numText) + 1 self.format = pattern % (prefix, num, suffix) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate(self, node, _):\n new_node = ast.Num(n=node.n + 1)\n return new_node", "def next(self):\n with self.atomicfile.locked():\n curr = self.atomicfile.read_atomic().decode(\"utf8\")\n curr = self.initial if not curr else int(curr)\n self.atomicfile.write_atomic(str(curr + 1).encode(\"utf8\"))\n return curr", "def next(self) -> int:\n self.index += 1\n return self.nodes_sorted[self.index]", "def get_next(self) -> float:\n return self._current + self._offset", "def next(self) -> int:\n node = self.list.pop()\n t = node.right\n while (t):\n self.list.append(t)\n t = t.left\n\n return node.val", "def next(self) -> int:\n self.pointer += 1\n return self.ordered_nodes[self.pointer-1]", "def get_next(node):\n return node['next']", "def set_next(node, value):\n node['next'] = value", "def getNext(self):", "def next(self, _event):\n self.set_val(self.val + 1)", "def get_next(current):\n return 0.5 * (current + n / current)", "def __next__(self) -> object:\n if not self.current_node:\n raise StopIteration\n\n current_node_value = self.current_node.value()\n self.current_node = self.current_node.next()\n return current_node_value", "def next(self) -> int:\n while (self.stack or self.node):\n if self.node:\n self.stack.append(self.node)\n self.node = self.node.left\n else:\n self.node = self.stack.pop()\n res = self.node.val\n self.node = self.node.right\n return res", "def new_id(self):\n self.next += 1\n return self.next", "def next(self) -> int:\n value = self.inorder[self.index]\n self.index = self.index + 1\n return value", "def get_next(node, offset):\n row, column = node\n row_offset, column_offset = offset\n return row + row_offset, column + column_offset", "def addNode(self, new_value): # Class O(n)\r\n if type(new_value) is not int: raise ValueError(\"Please, insert an integer\")\r\n h = self.head\r\n while 'next' in dir(h.next):\r\n h = h.next\r\n else:\r\n h.next = Node(new_value)", "def process(self, count):\n self.data.add_node(0)\n for index in range(1, count + 1):\n # print(\"{}.: {}\".format(index, self.data))\n self.data.move_circular(self.stepforward)\n self.data.add_node(index)\n return self.data.get_next()", "def next(self, initial):", "def get_next():\n return \"some_value\"", "def increment_node_index(self):\n self.node_index += 1", "def get_next(self) -> int:\n return self._current * self._step + self._offset", "def next(self, log=False):\n def get_next(i, l):\n for l, r in l:\n if l <= i <= r:\n return r+1\n elif l > i:\n break\n return i\n\n if log:\n r = self.next_li()\n else:\n r = self.next_cl()\n\n n = get_next(r.clidx, self.reg.get(self.liidx, []))\n self.clidx = n\n self.current = Record(self.liidx, self.clidx, None)\n return self.current", "def next(self) -> int:\n node = self.stack.pop()\n self.push_lefts(node.right)\n return node.val", "def get_next(self):\n return self.cur_node.next.data", "def _advance(self):\n self._current += self._increment # Accessing the superclass's field", "def next(self, delta=1):\n return Prufer.unrank(self.rank + delta, self.nodes)", "def next(self):\n temp = self.n\n try:\n self.n = next(self.g)\n except Exception as e:\n self._hasNext = False\n return temp", "def get_next_node_address(self):\n result = self.other_nodes[self.current_node]\n self.current_node = (self.current_node + 1) % self.other_nodes_len\n return result", "def __next__(self):\n\n # pointer is the current value\n # counter is an item next to pointer\n # take value from pointer position and reduce\n # counter until counter is not 0\n # if counter == 0 move pointer to the next position\n # with value (stride=2)\n if self.counter <= 0:\n # move pointer to the next item\n self.pointer += 2\n try:\n # take counter\n self.counter = self.data[self.pointer + 1]\n except IndexError:\n raise StopIteration\n\n # take value from pointer position and reduce counter\n value = self.data[self.pointer]\n self.counter -= 1\n\n return value" ]
[ "0.66704005", "0.63615674", "0.63538784", "0.6263111", "0.6259454", "0.625471", "0.62121814", "0.6187122", "0.615829", "0.61369663", "0.6088049", "0.60810864", "0.6007773", "0.5959815", "0.59519273", "0.5947382", "0.5939667", "0.58915627", "0.58673847", "0.5866701", "0.5839652", "0.583466", "0.5830155", "0.582938", "0.5807465", "0.5801369", "0.5796639", "0.5781664", "0.5766271", "0.5717102" ]
0.6884581
0
Return formatted text, properly escaped and with a link to the picture if not in titleMode
def formatOutput(self, storedText, titleMode, internal=False): if titleMode: return TextFormat.formatOutput(self, storedText, titleMode, internal) paths = storedText.split('\n') results = ['<img src="%s">' % escape(url, treedoc.escDict) for url in paths] return u'<br />'.join(results)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def image(self, src, title, text):\n src = escape_link(src)\n text = escape(text, quote=True)\n if title:\n title = escape(title, quote=True)\n html = '<img src=\"%s\" alt=\"%s\" title=\"%s\"' % (src, text, title)\n else:\n html = '<img src=\"%s\" alt=\"%s\"' % (src, text)\n if self.options.get('use_xhtml'):\n return '%s />' % html\n return '%s>' % html", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n altText = ''\n if self.linkAltField:\n field = item.nodeFormat().findField(self.linkAltField)\n if field:\n altText = field.outputText(item, titleMode, internal)\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, altText, internal)\n return ''", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode or not internal:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n # add prefix/suffix within the executable path:\n url = TextFormat.formatOutput(self, url, titleMode, internal)\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n results.append(u'<a href=\"%s\">%s</a>' %\n (escape(path, treedoc.escDict), altText or url))\n return u'<br />'.join(results)", "def formatOutput(self, storedText, titleMode, altText='', internal=False):\n if titleMode:\n return TextFormat.formatOutput(self, storedText, titleMode,\n internal)\n paths = storedText.split('\\n')\n results = []\n for url in paths:\n path = url\n if not URLFormat.hasMethodRe.match(path):\n path = u'%s%s' % (self.URLMethod, path)\n path = u'<a href=\"%s\">%s</a>' % (escape(path, treedoc.escDict),\n altText or url)\n results.append(TextFormat.formatOutput(self, path, titleMode,\n internal))\n return u'<br />'.join(results)", "def title(text, level=0):\n return '\\n' + text + '\\n' + '=-~_#%^' [level] * len(text) + '\\n\\n'", "def formatOutput(self, storedText, titleMode, internal=False):\n prefix = self.prefix\n suffix = self.suffix\n if titleMode:\n if self.html:\n storedText = self.removeMarkup(storedText)\n if globalref.docRef.formHtml:\n prefix = self.removeMarkup(prefix)\n suffix = self.removeMarkup(suffix)\n else:\n if not self.html:\n storedText = escape(storedText).replace('\\n', '<br />')\n if not globalref.docRef.formHtml:\n prefix = escape(prefix)\n suffix = escape(suffix)\n return u'%s%s%s' % (prefix, storedText, suffix)", "def get_text(downgrade_titles=False):", "def image(self, text):\n pattern = re.compile(r\"\"\"\n (?:[\\[{])? # pre\n \\! # opening !\n (\\<|\\=|\\>)? # optional alignment atts\n (%s) # optional style,class atts\n (?:\\. )? # optional dot-space\n ([^\\s(!]+) # presume this is the src\n \\s? # optional space\n (?:\\(([^\\)]+)\\))? # optional title\n \\! # closing\n (?::(\\S+))? # optional href\n (?:[\\]}]|(?=\\s|$)) # lookahead: space or end of string\n \"\"\" % self.c, re.U | re.X)\n return pattern.sub(self.fImage, text)", "def image(self, link, title, alt):\n if not link.startswith(('http://', 'https://')):\n source_dir = os.path.dirname(self.source_path)\n link = os.path.abspath(os.path.join(source_dir, link))\n return '<img src=\"%s\" title=\"%s\" alt=\"%s\" />' % (link, title, alt)", "def html_title(title):\n return '<center><h1>%s</h1></center>' % (title)", "def image_preview(self):\r\n h = '<img src=\"%s\" alt=\"%s\"/>' % (self.image_resized_url, self.title)\r\n return mark_safe(h)", "def get_title(text, uuid=None):\n if uuid is not None:\n text += get_provenance_link(uuid)\n title = pn.Row(pn.pane.HTML('<h2>{}</h2>'.format(text)), align='start')\n\n return title", "def addTitle(text):\n\treturn OnscreenText(text=text, style=1, fg=(1,1,1,1), \\\n\t\tpos=(1.3,-0.95), align=TextNode.ARight, scale = .07)", "def __str__(self):\n t = Template(\n \"\"\"\n <h4>$title</h4>\n $imgs\n $footnotes\n <hr/>\"\"\")\n # Return result.\n return t.substitute({\n \"title\": self.title,\n \"imgs\": self.render_images(),\n \"footnotes\": self.render_footnotes()\n })", "def get_title():", "def format_url(self, url, text):\r\n return u'<a href=\"%s\">%s</a>' % (escape(url), text)", "def linkified_description(self):\n links = []\n def linkify(matchobj, links=links):\n if '|' in matchobj.group(1):\n url = matchobj.group(1).split('|')\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', url[0], url[1])\n else:\n link = format_html('<a href=\"{0}\" target=\"_blank\">{1}</a>', self.url, matchobj.group(1))\n links.append(link)\n return '{%d}' % (len(links) - 1)\n\n fmt = re.sub(r'\\[\\[([^\\]]+)\\]\\]', linkify, self.description)\n return format_html(fmt, *links)", "def make_main_title(self, end, end_center=False):\n main_title = r\"\\begin{center}\"\n if self.detector is not None:\n main_title += \"%s \"%self.detector\n if self.selection is not None:\n main_title += \"%s Event Selection \"%self.selection\n main_title += end\n if end_center:\n main_title += r\"\\end{center}\"\n return main_title", "def markdown_item(title, url):\n print('* [{0}]({1})'.format(\n markdown_escape(title),\n markdown_escape(url),\n ))", "def show_title():\r\n complement = (\r\n '\\n __ ')\r\n title = ('\\n _______ _______________ ____ _______ __ ___ _ _______/ /_ ____ _____ ____ ____ ')\r\n title += ('\\n / ___/ / / / ___/ ___/ _ \\/ __ \\/ ___/ / / / / _ \\| |/_/ ___/ __ \\/ __ `/ __ \\/ __ `/ _ \\ ')\r\n title += ('\\n/ /__/ /_/ / / / / / __/ / / / /__/ /_/ / / __/> </ /__/ / / / /_/ / / / / /_/ / __/ ')\r\n title += ('\\n\\___/\\__,_/_/ /_/ \\___/_/ /_/\\___/\\__, / \\___/_/|_|\\___/_/ /_/\\__,_/_/ /_/\\__, /\\___/ ')\r\n title += ('\\n /____/ /____/ ')\r\n # Add Styles\r\n break_line = ('-' * len(complement) + \"\\n\") * 2\r\n print(\"{}\\n{}\\n{}\\n\".format(break_line, title, break_line))", "def _get_title_tag(self, item):\n tag = '<{heading}><a href=\"{href}\">{title}</a></{heading}>'\n if self._field_is_visible(\"title\"):\n tile_conf = self.get_tile_configuration()\n title_conf = tile_conf.get(\"title\", None)\n if title_conf:\n heading = title_conf.get(\"htmltag\", \"h2\")\n href = item.absolute_url()\n title = item.Title()\n return tag.format(heading=heading, href=href, title=title)", "def add_title(text):\n return OnscreenText(text=text, style=1, pos=(-0.1, 0.09), scale=.08,\n parent=base.a2dBottomRight, align=TextNode.ARight,\n fg=(1, 1, 1, 1), shadow=(0, 0, 0, 1))", "def complete_alt_title(self, obj):\n return str(obj)", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def prep_title(self):\n self.title_image = self.font.render(self.title, True, self.text_color,\n self.ctl_settings.panel_bg_color)\n self.title_image_rect = self.title_image.get_rect()\n self.title_image_rect.centerx = self.rect.centerx\n self.title_image_rect.bottom = self.rect.top - 1", "def helptext(self):\n return \"\"\"\n <b>A</b> to start the aperture or set the value<br/>\n <b>S</b> to select an existing aperture<br/>\n <b>C</b> to clear the selection<br/>\n <b>F</b> to find a peak close to the cursor<br/>\n <b>[</b> to edit the left edge of selected or closest<br/>\n <b>]</b> to edit the right edge of selected or closest<br/>\n <b>L</b> to edit the location of selected or closest<br/>\n <b>D</b> to delete the selected or closest aperture\n \"\"\"", "def editModeHeading(text):\n return u'<p style=\"editModeHeading\">%s</p>' % text", "def title_draw():\n nonlocal width\n widthTitle = len(self.str_title)\n if widthTitle > width:\n self.str_title = self.str_title[0:width-5] + '...'\n widthTitle = len(self.str_title)\n h_len = widthTitle + self.l_padding + self.r_padding\n top = ''.join(['┌'] + ['─' * h_len] + ['┐']) + '\\n'\n result = top + \\\n '│' + \\\n ' ' * self.l_padding + \\\n self.str_title + \\\n ' ' * self.r_padding + \\\n '│' + self.str_shadow + '\\n'\n offset = 2 + self.l_padding + len(self.str_title) + self.r_padding\n return result, offset", "def _get_title_text(self):\n return Text(\n self,\n self.settings.font_bold_filename,\n 96,\n self.settings.font_color,\n 'zuckbot',\n {'center': self.screen_rect.center},\n 0,\n -50,\n )", "def convert_text_to_rouge_format(text, title=\"dummy title\"):\n sentences = text.split(\"\\n\")\n sent_elems = [\n \"<a name=\\\"{i}\\\">[{i}]</a> <a href=\\\"#{i}\\\" id={i}>\"\n \"{text}</a>\".format(i=i, text=sent)\n for i, sent in enumerate(sentences, start=1) if sent != '']\n html = \"\"\"<html>\n<head>\n<title>{title}</title>\n</head>\n<body bgcolor=\"white\">\n{elems}\n</body>\n</html>\"\"\".format(title=title, elems=\"\\n\".join(sent_elems))\n\n return html" ]
[ "0.625808", "0.6189183", "0.59681433", "0.59475166", "0.5911793", "0.58762133", "0.5813121", "0.5810856", "0.5802654", "0.58000255", "0.5753115", "0.57340395", "0.572571", "0.5709024", "0.5676528", "0.567329", "0.5668774", "0.5666511", "0.56526905", "0.56396496", "0.56106", "0.55729854", "0.5569004", "0.5564576", "0.55612147", "0.55445063", "0.5542113", "0.5535897", "0.5534921", "0.55348563" ]
0.68930674
0
Interpolates between two vectors that are nonzero and don't both lie on a line going through origin. First normalizes v2 to have the same norm as v1. Then interpolates between the two vectors on the hypersphere.
def interpolate_hypersphere(v1, v2, num_steps): v1_norm = tf.norm(v1) v2_norm = tf.norm(v2) v2_normalized = v2 * (v1_norm / v2_norm) vectors = [] for step in range(num_steps): interpolated = v1 + (v2_normalized - v1) * step / (num_steps - 1) interpolated_norm = tf.norm(interpolated) interpolated_normalized = interpolated * (v1_norm / interpolated_norm) vectors.append(interpolated_normalized) return tf.stack(vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]", "def intersect_2_lines(P1, V1, P2, V2):\n Vx = np.cross(V1, V2)\n s = np.dot(np.cross(P2 - P1, V1), Vx)/np.dot(Vx, Vx)\n return s", "def distanceOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return 0\n res = res[None, None, :]\n return np.linalg.norm((p1 + res[..., 0] * v1) - (p2 + res[..., 1] * v2), axis=1)[0]", "def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]", "def make_q(v0, v2):\n return (v0.y - v2.y)/(v0.x - v2.x)", "def linear_interpolate(x, x0, y0, x1, y1):\n try:\n return (y0 * (x1 - x) + y1 * (x - x0)) / (x1 - x0)\n except ZeroDivisionError:\n return 0.0", "def __init__(self,v0,v1):\n self.vinputs = v0,v1\n self.xhi = max([v0[0],v1[0]])\n self.yhi,self.ylo = v0[1]>v1[1] and (v0[1],v1[1],) or (v1[1],v0[1])\n\n self.m = (v1[0]-v0[0]) / (v1[1]-v0[1]) ### (x1-x0)/(y1-y0)\n self.b = v0[0] - (v0[1] * self.m) ### x0 - y0*(x1-x0)/(y1-y0)", "def vincenty(lat1, lon1, lat2, lon2,\n r_major=6378.1370, r_minor=6356.752314, r_sphere=None):\n lat1 = m.radians(lat1)\n lat2 = m.radians(lat2)\n lon1 = m.radians(lon1)\n lon2 = m.radians(lon2)\n \n if (r_sphere is not None):\n r_major = r_sphere\n r_minor = r_sphere\n f = 0.0\n else:\n f = (r_major-r_minor)/r_major\n \n U1 = m.atan((1.0-f) * m.tan(lat1))\n U2 = m.atan((1.0-f) * m.tan(lat2))\n L = lon2 - lon1\n \n epsilon = 1E-12 # Accuracy (10E-12 -> ~ 0.06mm)\n max_iter = 500\n lam = L\n \n cU1 = m.cos(U1)\n cU2 = m.cos(U2)\n sU1 = m.sin(U1)\n sU2 = m.sin(U2)\n \n for i in range(max_iter):\n lam_old = lam\n sLam = m.sin(lam)\n cLam = m.cos(lam)\n sin_sig = m.sqrt((cU2*sLam)**2 + (cU1*sU2 - sU1*cU2*cLam)**2)\n cos_sig = sU1*sU2 + cU1*cU2*cLam\n sig = m.atan2(sin_sig,cos_sig)\n sin_alp = (cU1*cU2*sLam) / sin_sig\n cos2_alp = 1.0 - sin_alp**2\n if (cos2_alp == 0.0):\n # equitorial line\n cos_2sigm = 100\n C = 0.0\n else:\n cos_2sigm = cos_sig - (2.0*sU1*sU2)/cos2_alp\n C = f/16.0 * cos2_alp * (4.0 + f*(4.0-3.0*cos2_alp))\n lam = L + (1.0 - C) * f * sin_alp * \\\n (sig + C * sin_sig * (cos_2sigm + C * cos_sig * \\\n (-1.0 + 2.0 * cos_2sigm**2)))\n if ((m.fabs(lam - lam_old)) <= epsilon):\n # Found a solution in i iters...\n break\n elif (i == max_iter):\n # Catch the out of iters case, never seen this.\n raise Exception(\"Failed to solve for distance\")\n \n usq = cos2_alp * ((r_major**2 - r_minor**2) / r_minor**2)\n A = 1 + usq/16384 * (4096 + usq*(-768 + usq*(320 - 175*usq)))\n B = usq/1024 * (256 + usq*(-128 + usq*(74 - 47*usq)))\n del_sig = B * sin_sig * (cos_2sigm + 0.25*B*(cos_sig*( \\\n -1 + 2*cos_2sigm**2) - (1.0/6.0)*B*cos_2sigm * ( \\\n -3 + 4*sin_sig**2) * (-3 + 4 * cos_2sigm**2)))\n s = r_minor * A * (sig - del_sig)\n alp1 = m.atan2(cU2*m.sin(lam),(cU1*sU2-sU1*cU2*m.cos(lam)))\n alp2 = m.atan2(cU1*m.sin(lam),(cU1*sU2*m.cos(lam)-sU1*cU2))\n\n return (s, m.degrees(alp1), m.degrees(alp2))", "def nor_vector(p1: Vec2, p2: Vec2) -> Vec2:\n return Vec2(p1.y - p2.y, p2.x - p1.x)", "def midpoint_line(a, b):\n return scale_vector(add_vectors(a, b), 0.5)", "def _LinearInterpolate(x0, target, x1, y0, y1):\n if x0 == x1:\n return (y0 + y1) / 2\n return (y1 - y0) * (target - x0) / (x1 - x0) + y0", "def planeLineIntersect(p1, p2, equ):\n n = vector(equ[0], equ[1], equ[2])\n v1, v2 = vector(p1), vector(p2)\n t = (equ[3] - n.dotProduct(v2)) / (n.dotProduct(v1 - v2))\n return (t * v1 + (1 - t) * v2).coords()", "def test_from_two_vectors(self):\r\n for _ in range(20):\r\n v0 = np.random.randn(3)\r\n v1 = np.random.randn(3)\r\n v0 /= np.linalg.norm(v0)\r\n v1 /= np.linalg.norm(v1)\r\n\r\n q = from_two_vectors(v0, v1)\r\n R = to_rotation(q)\r\n\r\n zero_vec = R @ v0 - v1\r\n self.assertAlmostEqual(np.linalg.norm(zero_vec), 0.0)\r\n\r\n q_inv = from_two_vectors(v1, v0)\r\n R_inv = to_rotation(q_inv)\r\n zero_matrix = R @ R_inv - np.identity(3)\r\n self.assertAlmostEqual(np.linalg.norm(zero_matrix), 0.0)", "def projection(v1, v2):\n v1 = v1.astype(np.float64)\n\n if np.any(v1):\n result = (np.dot(v1, v2) / np.dot(v1, v1)) * v1\n else:\n result = np.zeros(3)\n\n return result", "def cross(v1: Vec2, v2: Vec2) -> float:\n return v1.x * v2.x + v1.y * v2.y", "def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result", "def two_norm(v):\n return math.sqrt(dot_product(v, v))", "def mirror_vector_vector(v1, v2):\n return subtract_vectors(v1, scale_vector(v2, 2 * dot_vectors(v1, v2)))", "def sweptAABB(b1, b2):\n\tnormalX = 0;\n\tnormalY = 0;\n\n\t\"\"\"\n\tfloat xInvEntry, yInvEntry;\n\tfloat xInvExit, yInvExit;\n\n\t// find the distance between the objects on the near and far sides for both x and y\n\tif (b1.vx > 0.0f) {\n\t\txInvEntry = b2.x - (b1.x + b1.w);\n\t\txInvExit = (b2.x + b2.w) - b1.x;\n\t} else {\n\t\txInvEntry = (b2.x + b2.w) - b1.x;\n\t\txInvExit = b2.x - (b1.x + b1.w);\n\t}\n\n\tif (b1.vy > 0.0f) {\n\t\tyInvEntry = b2.y - (b1.y + b1.h);\n\t\tyInvExit = (b2.y + b2.h) - b1.y;\n\t} else {\n\t\tyInvEntry = (b2.y + b2.h) - b1.y;\n\t\tyInvExit = b2.y - (b1.y + b1.h);\n\t}\n\t\"\"\"\n\n\txInvEntry = 0;\n\tyInvEntry = 0;\n\txInvExit = 0;\n\tyInvExit = 0;\n\n\tif (b1.vx > 0):\n\t\txInvEntry = b2.x - (b1.x + b1.w);\n\t\txInvExit = (b2.x + b2.w) - b1.x;\n\telse:\n\t\txInvEntry = (b2.x + b2.w) - b1.x;\n\t\txInvExit = b2.x - (b1.x + b1.w);\n\t\n\n\tif (b1.vy > 0):\n\t\tyInvEntry = b2.y - (b1.y + b1.h);\n\t\tyInvExit = (b2.y + b2.h) - b1.y;\n\telse:\n\t\tyInvEntry = (b2.y + b2.h) - b1.y;\n\t\tyInvExit = b2.y - (b1.y + b1.h);\n\n\t\"\"\"\n\t// find time of collision and time of leaving for each axis (if statement is to prevent divide by zero)\n\tfloat xEntry, yEntry;\n\tfloat xExit, yExit;\n\n\tif (b1.vx == 0.0f) {\n\t\txEntry = -std::numeric_limits<float>::infinity();\n\t\txExit = std::numeric_limits<float>::infinity();\n\t} else {\n\t\txEntry = xInvEntry / b1.vx;\n\t\txExit = xInvExit / b1.vx;\n\t}\n\n\tif (b1.vy == 0.0f) {\n\t\tyEntry = -std::numeric_limits<float>::infinity();\n\t\tyExit = std::numeric_limits<float>::infinity();\n\t} else {\n\t\tyEntry = yInvEntry / b1.vy;\n\t\tyExit = yInvExit / b1.vy;\n\t}\n\t\"\"\"\n\txEntry = 0;\n\tyEntry = 0;\n\txExit = 0;\n\tyExit = 0;\n\n\tif (b1.vx == 0):\n\t\txEntry = -float(\"inf\");\n\t\txExit = float(\"inf\");\n\telse:\n\t\txEntry = xInvEntry / b1.vx;\n\t\txExit = xInvExit / b1.vx;\n\n\tif (b1.vy == 0):\n\t\tyEntry = -float(\"inf\");\n\t\tyExit = float(\"inf\");\n\telse:\n\t\tyEntry = yInvEntry / b1.vy;\n\t\tyExit = yInvExit / b1.vy;\n\t\n\tif (yEntry > 1):\n\t\tyEntry = -float(\"inf\");\n\tif (xEntry > 1):\n\t\txEntry = -float(\"inf\");\n\n\n\t\"\"\"\n\t// find the earliest/latest times of collision\n\tfloat entryTime = std::max(xEntry, yEntry);\n\tfloat exitTime = std::min(xExit, yExit);\n\t\"\"\"\n\n\tentryTime = max(xEntry, yEntry);\n\texitTime = min(xExit, yExit);\n\n\t\"\"\"\n\t// if there was no collision\n\tif (entryTime > exitTime || xEntry < 0.0f && yEntry < 0.0f || xEntry > 1.0f || yEntry > 1.0f) {\n\t\tnormalX = 0.0f;\n\t\tnormalY = 0.0f;\n\t\treturn 1.0f;\n\t} else {\n\t\t// if there was a collision\n\t\t// calculate normal of collided surface\n\t\tif (xEntry > yEntry) {\n\t\t\tif (xInvEntry < 0.0f) {\n\t\t\t\tnormalX = 1.0f;\n\t\t\t\tnormalY = 0.0f;\n\t\t\t} else {\n\t\t\t\tnormalX = -1.0f;\n\t\t\t\tnormalY = 0.0f;\n\t\t\t}\n\t\t} else {\n\t\t\tif (yInvEntry < 0.0f) {\n\t\t\t\tnormalX = 0.0f;\n\t\t\t\tnormalY = 1.0f;\n\t\t\t} else {\n\t\t\t\tnormalX = 0.0f;\n\t\t\t\tnormalY = -1.0f;\n\t\t\t}\n\t\t}\n\n\t\t// return the time of collision\n\t\treturn entryTime;\n\t}\n\t\"\"\"\n\n\t\"\"\"\n\tif (entryTime > exitTime)\n\t\treturn 1.0f; // This check was correct.\n\tif (entryX < 0.0f && entryY < 0.0f)\n\t\treturn 1.0f;\n\tif (entryX < 0.0f) {\n\t\t// Check that the bounding box started overlapped or not.\n\t\tif (s.max.x < t.min.x || s.min.x > t.max.x)\n\t\t\treturn 1.0f;\n\t}\n\tif (entryY < 0.0f) {\n\t\t// Check that the bounding box started overlapped or not.\n\t\tif (s.max.y < t.min.y || s.min.y > t.max.y)\n\t\t\treturn 1.0f;\n\t}\"\"\"\n\n\n\t# if (entryTime > exitTime or xEntry < 0 and yEntry < 0 or xEntry > 1 or yEntry > 1):\n\t# \tnormalX = 0;\n\t# \tnormalY = 0;\n\t# \treturn (1, (normalX, normalY));\n\t# else:\n\n\tnormalX = 0;\n\tnormalY = 0;\n\n\tif (entryTime > exitTime):\n\t\treturn (1, (0, 0));\n\t\n\tif (xEntry < 0 and yEntry < 0):\n\t\treturn (1, (0, 0));\n\n\tif (xEntry < 0):\n\t\tif (b1.x + b1.w < b2.x or b1.x > b2.x + b2.w):\n\t\t\treturn (1, (0, 0));\n\n\tif (yEntry < 0):\n\t\tif (b1.y + b1.h < b2.y or b1.y > b2.y + b2.h):\n\t\t\treturn (1, (0, 0));\n\n\tif (xEntry > yEntry):\n\t\tif (xInvEntry < 0):\n\t\t\tnormalX = 1;\n\t\t\tnormalY = 0;\n\t\telse:\n\t\t\tnormalX = -1;\n\t\t\tnormalY = 0;\n\telse:\n\t\tif (yInvEntry < 0):\n\t\t\tnormalX = 0;\n\t\t\tnormalY = 1;\n\t\telse:\n\t\t\tnormalX = 0;\n\t\t\tnormalY = -1;\n\n\treturn (entryTime, (normalX, normalY));", "def normalize_vector(v1):\n #this gets the vector length\n vector_length = get_vector_norm(v1)\n \n #divides each coordinate of the vector by its norm\n for key in v1:\n v1[key] = v1[key]/ vector_length", "def InterpolateSurfaceVectorsWithLine():\r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, PointData, PolygonData = CreateMatrixVTK(VTKString)\r\n Centroids1,Vectors1 = ElementNormal(PointData,PolygonData)\r\n # Load full volume centroid\r\n NCF_Str = OpenData(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles\",\"new_centroids_file.dat\")\r\n HeaderNCF,Centroids2 = CreateMatrixDat(NCF_Str)\r\n print('Loading Finished \\n Inserting Centre Line...')\r\n # Create Plane of vectors through centreline.\r\n PlaneCentroids,PlaneVectors = InsertCentreLine(Centroids1,Vectors1,50)\r\n print('Centre Line Inserted \\n Interpolating Centroids...')\r\n # Interpolate Vectors\r\n Vectors2 = VectorInter(PlaneCentroids,PlaneVectors,Centroids2)\r\n # Make the data more sparse to display better.\r\n C1,V1 = SparseData(PlaneCentroids,PlaneVectors,0.1)\r\n C2,V2 = SparseData(Centroids2,Vectors2,0.1)\r\n print('Interpolation Finished \\n Plotting...')\r\n # Plot Data\r\n fig = plt.figure()\r\n\r\n ax1 = fig.add_subplot(121,projection = '3d')\r\n DisplaySliceVectors(C1,V1,ax1,5,10)\r\n\r\n ax2 = fig.add_subplot(122,projection = '3d')\r\n DisplaySliceVectors(C2,V2,ax2,5,10)\r\n\r\n plt.show()\r\n\r\n header = 'TITLE = \\\"Normal Surface Vectors With Central axis Line\\\"\\nVARIABLES = \\\"XV\\\", \\\"YV\\\", \\\"ZV\\\" \\nZONE T=\\\"Step 0 Incr 0\\\" \\nF = VECTORS'\r\n\r\n np.savetxt(\"C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/OutputFiles/SurfaceLineVectorInterpolation.dat\",Vectors2,header = header,comments='')", "def dist_vincenty(lat1, lon1, lat2, lon2, iterations=20):\r\n if lat1 < -90 or lat1 > 90 or lat2 < -90 or lat2 > 90 or lon1 < -180 or lon1 > 180 or lon2 < -180 or lon2 > 180:\r\n raise ValueError(\r\n \"Latitude values shoulds range from (-90,90) and longitude from (-180,180) but one of the input values is out of bounds. Latitude_1: %f, Logitude_1: %f, Latitude_2: %f, Logitude_2: %f\" %\r\n (lat1, lon1, lat2, lon2))\r\n\r\n major, minor, f = 6378137, 6356752.314245, 1 / 298.257223563\r\n\r\n lat1, lng1, lat2, lng2 = radians(\r\n lat1), radians(lon1), radians(lat2), radians(lon2)\r\n delta_lng = lng2 - lng1\r\n reduced_lat1, reduced_lat2 = atan(\r\n (1 - f) * tan(lat1)), atan((1 - f) * tan(lat2))\r\n\r\n sin_reduced1, cos_reduced1 = sin(reduced_lat1), cos(reduced_lat1)\r\n sin_reduced2, cos_reduced2 = sin(reduced_lat2), cos(reduced_lat2)\r\n\r\n lambda_lng = delta_lng\r\n lambda_prime = 2 * pi\r\n while abs(lambda_lng - lambda_prime) > 10e-12 and iterations > 0:\r\n sin_lambda_lng, cos_lambda_lng = sin(lambda_lng), cos(lambda_lng)\r\n\r\n sin_sigma = sqrt(\r\n (cos_reduced2 * sin_lambda_lng) ** 2 +\r\n (cos_reduced1 * sin_reduced2 -\r\n sin_reduced1 * cos_reduced2 * cos_lambda_lng) ** 2\r\n )\r\n if sin_sigma == 0:\r\n return 0 # Coincident points\r\n\r\n cos_sigma = (\r\n sin_reduced1 * sin_reduced2 +\r\n cos_reduced1 * cos_reduced2 * cos_lambda_lng\r\n )\r\n sigma = atan2(sin_sigma, cos_sigma)\r\n\r\n sin_alpha = (cos_reduced1 * cos_reduced2 * sin_lambda_lng / sin_sigma)\r\n cos_sq_alpha = 1 - sin_alpha ** 2\r\n\r\n if cos_sq_alpha != 0:\r\n cos2_sigma_m = cos_sigma - 2 * \\\r\n (sin_reduced1 * sin_reduced2 / cos_sq_alpha)\r\n else:\r\n cos2_sigma_m = 0.0 # Equatorial line\r\n\r\n C = f / 16. * cos_sq_alpha * (4 + f * (4 - 3 * cos_sq_alpha))\r\n\r\n lambda_prime = lambda_lng\r\n lambda_lng = (\r\n delta_lng + (1 - C) * f * sin_alpha * (\r\n sigma + C * sin_sigma * (\r\n cos2_sigma_m + C * cos_sigma * (-1 + 2 * cos2_sigma_m ** 2)\r\n )\r\n )\r\n )\r\n iterations -= 1\r\n\r\n if iterations == 0:\r\n raise ValueError(\"Vincenty formula failed to converge!\")\r\n\r\n u_sq = cos_sq_alpha * (major ** 2 - minor ** 2) / minor ** 2\r\n A = 1 + u_sq / 16384. * (4096 + u_sq * (-768 + u_sq * (320 - 175 * u_sq)))\r\n B = u_sq / 1024. * (256 + u_sq * (-128 + u_sq * (74 - 47 * u_sq)))\r\n delta_sigma = B * sin_sigma * (\r\n cos2_sigma_m + B / 4. * (cos_sigma * (-1 + 2 * cos2_sigma_m ** 2) -\r\n B / 6. * cos2_sigma_m * (-3 + 4 * sin_sigma ** 2) *\r\n (-3 + 4 * cos2_sigma_m ** 2))\r\n )\r\n s = minor * A * (sigma - delta_sigma)\r\n\r\n return round(s, 3) # round to 1mm precision\r", "def line_sphere_intersection(p1, p2, c, r):\n\t# FILL in your code here\n\n\tline_vector=np.subtract(p2,p1) #np.array([p2[0]-p1[0], p2[1]-p1[1], p2[2]-p1[2] ])\n\tval=np.sqrt(np.sum([(p2 - p1)**2\n\t\t\t\t\t\t for p1, p2 in zip(p1,p2)]))\n\n\tif val==0:\n\t\tunit_vector=np.array([0,0,0])\n\telse:\n\t\tunit_vector=[linevec/val for linevec in line_vector]\n\tvecO_C=np.subtract(p1,c)\n\t\t\n\tres=np.dot(unit_vector,vecO_C)* np.dot(unit_vector,vecO_C) - ( np.dot(vecO_C, vecO_C) - r*r )\n\treturn res", "def linear_triangulation(p1, p2, m1, m2):\n num_points = p1.shape[1]\n res = np.ones((4, num_points))\n\n for i in range(num_points):\n A = np.asarray([\n (p1[0, i] * m1[2, :] - m1[0, :]),\n (p1[1, i] * m1[2, :] - m1[1, :]),\n (p2[0, i] * m2[2, :] - m2[0, :]),\n (p2[1, i] * m2[2, :] - m2[1, :])\n ])\n\n _, _, V = np.linalg.svd(A)\n X = V[-1, :4]\n res[:, i] = X / X[3]\n\n return res", "def is_linearly_independent_2x2(u, v):\n uv = get_uv(u, v)\n if uv[0][0] * uv[1][1] - uv[1][0] * uv[0][1] != 0:\n return True\n else:\n return False", "def solve(self, v1, v0, normalize=False):\n if normalize:\n dv = -(v1 - v0)/v0\n else:\n dv = v1 - v0\n return -np.dot(self.H, dv)", "def distance(v1: Union[np.ndarray, np.iterable, int, float], v2: Union[np.ndarray, np.iterable, int, float],\n normalised=True) -> float:\n return 1 - np.dot(v1, v2) / (np.linalg.norm(v1) * np.linalg.norm(v2)) if not normalised else 1 - np.dot(v1, v2)", "def interpolV(y, x, newX):\r\n \r\n num = len(x)\r\n #if (num != len(y)):\r\n #//System.out.println(\"Toolbox.interpolV(): Old x and y must be same length\"); \r\n \r\n newNum = len(newX)\r\n #//System.out.println(\"interpolV: newNum \" + newNum + \" num \" + num); \r\n #newY = [0.0 for i in range(newNum)]\r\n\r\n#//Renormalize ordinates:\r\n \r\n iMinAndMax = minMax(y)\r\n norm = y[iMinAndMax[1]]\r\n #//System.out.println(\"norm \" + norm);\r\n #yNorm = [0.0 for i in range(num)]\r\n newYNorm = [0.0 for i in range(newNum)] \r\n #for i in range(num):\r\n # yNorm[i] = y[i] / norm \r\n yNorm = [ x / norm for x in y ]\r\n\r\n#// Set any newX elements that are *less than* the first x element to th first \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n start = 0\r\n for i in range(newNum):\r\n if (newX[i] <= x[1]):\r\n newYNorm[i] = yNorm[0]\r\n start += 1\r\n \r\n if (newX[i] > x[1]):\r\n break\r\n \r\n \r\n#//System.out.println(\"start \" + start);\r\n#//System.out.println(\"x[0] \" + x[0] + \" x[1] \" + x[1] + \" newX[start] \" + newX[start]);\r\n#double jWght, jm1Wght, denom;\r\n\r\n\r\n if (start < newNum-1):\r\n\r\n j = 1 #//initialize old abscissae index\r\n #//outer loop over new abscissae\r\n for i in range(start, newNum):\r\n\r\n #//System.out.println(\"i \" + i + \" j \" + j);\r\n\r\n#// break out if current element newX is *greater* that last x element\r\n if ( (newX[i] > x[num-1]) or (j > (num-1)) ):\r\n break \r\n \r\n\r\n while (x[j] < newX[i]): \r\n j += 1\r\n \r\n #//System.out.println(\"i \" + i + \" newX[i] \" + newX[i] + \" j \" + j + \" x[j-1] \" + x[j-1] + \" x[j] \" + x[j]);\r\n #//1st order Lagrange method:\r\n jWght = newX[i] * (1.0 - (x[j-1]/newX[i])) #//(newX[i]-x[j-1])\r\n jm1Wght = x[j] * (1.0 - (newX[i]/x[j])) #//(x[j]-newX[i])\r\n denom = x[j] * (1.0 - (x[j-1]/x[j])) #//(x[j]-x[j-1])\r\n jWght = jWght / denom\r\n jm1Wght = jm1Wght / denom\r\n #//newYNorm[i] = (yNorm[j]*(newX[i]-x[j-1])) + (yNorm[j-1]*(x[j]-newX[i]));\r\n newYNorm[i] = (yNorm[j]*jWght) + (yNorm[j-1]*jm1Wght)\r\n #//System.out.println(\"i \" + i + \" newYNorm[i] \" + newYNorm[i] + \" j \" + j + \" yNorm[j-1] \" + yNorm[j-1] + \" yNorm[j] \" + yNorm[j]);\r\n \r\n\r\n#// Set any newX elements that are *greater than* the first x element to the last \r\n#// x element - \"0th order extrapolation\"\r\n#//\r\n for i in range(newNum):\r\n if (newX[i] >= x[num-1]):\r\n newYNorm[i] = yNorm[num-1]\r\n \r\n \r\n\r\n #//Restore orinate scale\r\n #for i in range(newNum):\r\n # newY[i] = newYNorm[i] * norm \r\n newY = [ x * norm for x in newYNorm ]\r\n\r\n\r\n return newY", "def dist_2D(v1, v2):\n return ((v1[0]-v2[0])**2 + (v1[1]-v2[1])**2 )**(0.5)", "def calculate_velocity_induced_by_line_vortices(\n points, origins, terminations, strengths, collapse=True\n):\n\n # Expand the dimensionality of the points input. It is now of shape (N x 1 x 3). This will allow numpy to\n # broadcast the upcoming subtractions.\n points = np.expand_dims(points, axis=1)\n\n # Define the vectors from the vortex to the points. r_1 and r_2 now both are of shape (N x M x 3). Each row/column\n # pair holds the vector associated with each point/vortex pair.\n r_1 = points - origins\n r_2 = points - terminations\n\n # Define the vector from the vortex origins to the vortex terminations. This is of shape (N x M x 3).\n r_0 = r_1 - r_2\n\n # Calculate the vector cross product. This is of shape (N x M x 3).\n r_1_cross_r_2 = np.cross(r_1, r_2)\n\n # Calculate the cross product's absolute magnitude. This is of shape (N x M).\n r_1_cross_r_2_absolute_magnitude = (\n r_1_cross_r_2[:, :, 0] ** 2\n + r_1_cross_r_2[:, :, 1] ** 2\n + r_1_cross_r_2[:, :, 2] ** 2\n )\n\n # Calculate the vector lengths. These are of shape (N x M).\n r_1_length = np.linalg.norm(r_1, axis=-1)\n r_2_length = np.linalg.norm(r_2, axis=-1)\n\n # Define the radius of the line vortices. This is used to get rid of any singularities.\n radius = 3.0e-16\n\n # Set the lengths and the absolute magnitudes to zero, at the places where the lengths and absolute magnitudes are\n # less than the vortex radius. This insures that the calculation for the constant k will produce np.inf or np.nan\n # values at the locations where there are singularities.\n r_1_length[r_1_length < radius] = 0\n r_2_length[r_2_length < radius] = 0\n r_1_cross_r_2_absolute_magnitude[r_1_cross_r_2_absolute_magnitude < radius] = 0\n\n # Calculate the vector dot products. This uses numpy's einsum function for speed.\n r_0_dot_r_1 = np.einsum(\"ijk,ijk->ij\", r_0, r_1)\n r_0_dot_r_2 = np.einsum(\"ijk,ijk->ij\", r_0, r_2)\n\n # Calculate k and then the induced velocity, ignoring any divide-by-zero or nan errors. k is of shape (N x M)\n with np.errstate(divide=\"ignore\", invalid=\"ignore\"):\n k = (\n strengths\n / (4 * np.pi * r_1_cross_r_2_absolute_magnitude)\n * (r_0_dot_r_1 / r_1_length - r_0_dot_r_2 / r_2_length)\n )\n\n # Set the shape of k to be (N x M x 1) to support numpy broadcasting in the subsequent multiplication.\n k = np.expand_dims(k, axis=2)\n\n # Multiple k by the cross products of r_1 and r_2 to get the non-collapsed matrix of induced velocities. This is\n # of shape (M x N x 3).\n induced_velocities = k * r_1_cross_r_2\n\n # Set the values of the induced velocity to zero where there are singularities.\n induced_velocities[np.isinf(induced_velocities)] = 0\n induced_velocities[np.isnan(induced_velocities)] = 0\n\n if collapse:\n induced_velocities = np.sum(induced_velocities, axis=1)\n\n return induced_velocities" ]
[ "0.6572107", "0.6260578", "0.60963017", "0.60499376", "0.59216046", "0.588906", "0.584281", "0.58285564", "0.5783416", "0.5775993", "0.57329005", "0.5730922", "0.5719066", "0.57099354", "0.56855845", "0.56546366", "0.565153", "0.56246614", "0.5574889", "0.5566318", "0.5547199", "0.55358326", "0.5532392", "0.5508854", "0.5501112", "0.54998463", "0.54860836", "0.5475693", "0.5462909", "0.5454843" ]
0.6959037
0
Given a set of images, show an animation.
def animate(images): images = np.array(images) converted_images = np.clip(images * 255, 0, 255).astype(np.uint8) imageio.mimsave('./animation.gif', converted_images) return embed.embed_file('./animation.gif')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def show_images(images):\n for name, img in images:\n cv2.imshow(name, img)\n\n cv2.waitKey(0)", "def display_images(filenames):\n for filename in filenames:\n display(Image(filename))", "def display_frames_as_gif(frames):\n fig=e.cube.show_layout(frames[0]) \n print(\"Drawn\")\n def animate(i):\n return e.cube.update_plot(frames[i])\n anim = animation.FuncAnimation(fig, animate, frames = len(frames), interval=50,blit=True)", "def animate(frames):\n plt.grid('on')\n ax = plt.gca()\n ax.set_xticks(np.arange(0.5, 10, 1))\n ax.set_yticks(np.arange(0.5, 10, 1))\n ax.set_xticklabels([])\n ax.set_yticklabels([])\n\n for i in range(len(env_list)):\n ax.imshow(env_list[i],cmap='binary')\n plt.pause(0.05)", "def show(images, concat=True, return_plots=False):\r\n if concat:\r\n images = np.concatenate([img_to_rgb(img) for img in images], axis=1)\r\n return show([images], concat=False, return_plots=return_plots)\r\n else:\r\n plots = []\r\n for img in images:\r\n fig = plt.figure(figsize=(15, 7))\r\n plots.append(fig)\r\n plt.imshow((img * 255).astype(np.uint8))\r\n plt.show()\r\n if return_plots:\r\n return plots", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()\n # plt.savefig('./drive/My Drive/Colab Notebooks/TACK/Large/result' + ' '.join(name.split('_')).title() + '.png')", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def visualize(**images):\n n = len(images)\n plt.figure(figsize=(16, 5))\n for i, (name, image) in enumerate(images.items()):\n plt.subplot(1, n, i + 1)\n plt.xticks([])\n plt.yticks([])\n plt.title(' '.join(name.split('_')).title())\n plt.imshow(image)\n plt.show()", "def show_images(images, cols = 1, titles = None):\n import matplotlib.pyplot as plt\n import numpy as np\n \n assert((titles is None) or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Frame %d' % i for i in range(n_images)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(np.ceil(n_images/float(cols)), cols, n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title, size = 50)\n a.axis('off')\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()", "def matplotlibDisplayMulti(imgs, titles=None, colorFlag='gray'):\n if titles is None:\n titles = []\n for i in range(len(imgs)):\n titles.append(\"IMAGE \" + str(i))\n for i in range(len(imgs)):\n plt.subplot(1, len(imgs), 1+i)\n plt.imshow(imgs[i], colorFlag)\n plt.title(titles[i])\n plt.xticks([])\n plt.yticks([])\n plt.show()", "def show_images_pyplot(images, titles, cols=1):\n assert len(images) == len(titles), 'Every image should have unique title!'\n n_images = len(images)\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.axis('off')\n plt.imshow(image)\n a.set_title(title)\n manager = plt.get_current_fig_manager()\n manager.resize(*manager.window.maxsize())\n plt.show()", "def animate(directory,gifname,n_t,step=2,duration=0.2):\n\t# create list of filenames\n\tfnames = dir_fname(directory,\"*\")\n\t# create list of plots\n\timages=[] \n\tfor k in range(0,n_t):\n\t\tk = k*step\n\t\tprint('Mounting Im '+ str(k))\n\t\tFIG_NAME=fnames[k]\n\t\timages.append(imageio.imread(FIG_NAME)) # read\n\t# Now we can assemble the video\n\timageio.mimsave(gifname, images,duration=duration) # create gif\n\tprint('Animation'+gifname+'Ready')\n\treturn True", "def make_movie_views(self, animation, filename=\"brainmovie%07d.png\", \n offset=0, fps=30, size=(1920, 1080), alpha=1, frame_sleep=0.05,\n frame_start=0, interpolation=\"linear\"):\n allframes = self._get_anim_seq(animation, fps, interpolation)\n for fr, frame in enumerate(allframes[frame_start:], frame_start):\n self._set_view(**frame)\n time.sleep(frame_sleep)\n self.getImage(filename%(fr+offset+1), size=size)\n time.sleep(frame_sleep)", "def show_images_opencv(images, titles):\n assert len(images) == len(titles), 'Every image should have unique title!'\n for img, title in zip(images, titles):\n cv2.imshow(title, img)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def setAnimations(*args):", "def visualize(**images):\r\n n_images = len(images)\r\n plt.figure(figsize=(20, 8))\r\n for idx, (name, image) in enumerate(images.items()):\r\n plt.subplot(1, n_images, idx + 1)\r\n plt.xticks([])\r\n plt.yticks([])\r\n # get title from the parameter names\r\n plt.title(name.replace('_', ' ').title(), fontsize=20)\r\n plt.imshow(image)\r\n plt.show()", "def show_images(images, cols = 1, titles = None):\n params = {'axes.titlesize': 8,\n 'axes.labelsize': 8,\n 'font.size': 8,\n 'legend.fontsize': 8,\n 'xtick.labelsize': 8,\n 'ytick.labelsize': 8,\n 'font.family': 'DejaVu Serif',\n 'font.serif': 'Computer Modern',\n }\n plt.rcParams.update(params)\n assert((titles is None)or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1,n_images + 1)]\n fig = plt.figure()\n plt.title(\"Point Shift Sweeps from -30 to 30\")\n \n for n, (image, title) in enumerate(zip(images, titles)):\n \n a = fig.add_subplot(cols, np.ceil(n_images/float(cols)), n + 1)\n a.get_yaxis().set_visible(False)\n a.get_xaxis().set_visible(False)\n\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image, origin='lower') \n fig.set_size_inches(np.array(fig.get_size_inches()))\n \n\n plt.show()", "def browse_images(images, titles=None):\n\n if titles == None:\n titles = [i for i in range(len(images))]\n\n n = len(images)\n\n def view_image(i):\n plt.imshow(images[i][:, :, ::-1], cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title(titles[i], y=-0.5)\n plt.show()\n\n interact(view_image, i=(1, n - 1))", "def show_images(images, cols=1, titles=None):\n assert ((titles is None) or (len(images) == len(titles)))\n n_images = len(images)\n if titles is None: titles = ['Image (%d)' % i for i in range(1, n_images + 1)]\n fig = plt.figure()\n for n, (image, title) in enumerate(zip(images, titles)):\n a = fig.add_subplot(cols, np.ceil(n_images / float(cols)), n + 1)\n if image.ndim == 2:\n plt.gray()\n plt.imshow(image)\n a.set_title(title)\n fig.set_size_inches(np.array(fig.get_size_inches()) * n_images)\n plt.show()" ]
[ "0.6905455", "0.6872684", "0.68629754", "0.6797282", "0.66781235", "0.6561315", "0.6522247", "0.6522247", "0.6522247", "0.6437156", "0.6422651", "0.6395544", "0.6391225", "0.63497686", "0.63120157", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.62429947", "0.6235614", "0.62281764", "0.6216612", "0.62069076" ]
0.76514333
0
Extract the session token from the secret_key field.
def extract_session_from_secret(secret_key, session_token): if secret_key and '@@@' in secret_key and not session_token: return secret_key.split('@@@')[0], secret_key.split('@@@')[1] else: return secret_key, session_token
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_token(self):\n token = self._session.token\n return token", "def _shib_get_token(self): # pragma: no cover\n\n shibCookie = None\n for cookie in self._session.cookies:\n if \"shibsession\" in cookie.name:\n shibCookie = cookie\n break\n\n if not shibCookie:\n warnings.warn(\"No session token found.\", AuthenticationWarning)\n\n return shibCookie", "def parse_token(token):\n return jwt.decode(token, app.config['JWT_SECRET'])", "def get_session_secret():\n singleton = Secrets._get_or_make_singleton()\n return singleton.session_secret", "def get_token(self): # pragma: no cover\n\t\treturn (session.get(\"access_token\"), \"\")", "def peek_app_token():\n if not os.path.exists(_token_storage_path):\n return None\n\n try:\n with open(_token_storage_path) as secret_file:\n return json.loads(secret_file.read())\n\n except Exception as exc:\n log.error(f'Could not read secret file.\\n{exc}')\n traceback.print_exc(file=sys.stderr)", "def getSessionFromJWT(token):\n token_payload = token.split('.')[1]\n\n padded_token = token_payload + \"=\" * divmod(len(token_payload), 4)[1]\n session_data = json.loads(base64.urlsafe_b64decode(padded_token))\n\n return session_data", "def get_session_token(self, server):\n headers = {\"MMSAuth\": self.auth_token, \"MMSAuthSig\": self.auth_sig}\n url = \"https://%s/info/session/token\" % server\n session_token = self.session.get(url, headers=headers).text\n\n return session_token", "def token(self):\n return self[\"token\"]", "def get_token(self):\n\n try:\n return jwt.decode(self.fetch_token(), KEY, algorithms=['HS256'])\n except jwt.exceptions.DecodeError:\n raise InvalidToken", "def get_token(self, session, **kwargs):\n return None", "def env_var_aws_session_token():\n return 'AWS_SESSION_TOKEN'", "def _resolve_secret_token(name, key, model_context):\n global _secret_token_map\n\n if _secret_token_map is None:\n _init_secret_token_map(model_context)\n\n secret_token = name + ':' + key\n return dictionary_utils.get_element(_secret_token_map, secret_token)", "def get_token_from_secret_file(secret_file_path):\n try:\n with open(secret_file_path, \"r\") as f:\n return f.readline()\n except FileNotFoundError:\n raise BaseSpaceDownloadError(\"Secret file not found\")\n except PermissionError:\n raise BaseSpaceDownloadError(\"No permissions to read secret file\")", "def get_token():\n return session.get('microsoft_token')", "def get_token():\n return session.get('microsoft_token')", "def getLastFmSessionKey(self, token):\n \n apiRequest = LastFmApiRequest('auth.getSession', {'token': unicode(token).encode('utf-8')})\n \n logging.debug('sessionKey URL: ' + apiRequest.url())\n \n response = apiRequest.execute()\n root = response.getroot()\n \n sessionKey = root.xpath('//key/text()')[0]\n username = root.xpath('//name/text()')[0]\n \n logging.debug('user:' + username + ' session:' + sessionKey)\n \n # I'm not crazy about the way this couples the mixin and webapp2.RequestHandler\n self.setCookie(self.SESSIONKEY_COOKIE, sessionKey)\n self.setCookie(self.USERNAME_COOKIE, username)", "def decode_token(token):\n\n return jwt.decode(\n token, settings.JWT_SECRET, algorithms=[settings.JWT_ALGO])", "def decode_token(token):\n try:\n # Decode token with our secret key\n payload = jwt.decode(token, SECRET_KEY)\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # token has expired\n return \"Timed out. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token", "def token_key(text):\n content2 = str(text.split())\n beginning = content2.find('access_token\":\"') + int(15)\n end = content2.find('token_type') - int(3)\n access_token = content2[beginning:end]\n return access_token", "def token():\n return os.environ.get('TOKEN', None)", "def decode_token(token):\n try:\n payload = jwt.decode(\n token, app.config.get('SECRET_KEY'), algorithms='HS256')\n return payload['sub']\n except jwt.ExpiredSignatureError:\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n return \"Invalid token. Please register or login\"", "def get_oauth_token():\n return session.get('remote_oauth')", "def _get_token(self): # pragma: no cover\n\n tokenCookie = None\n for cookie in self._session.cookies:\n if \"mast_token\" in cookie.name:\n tokenCookie = cookie\n break\n\n if not tokenCookie:\n warnings.warn(\"No auth token found.\", AuthenticationWarning)\n\n return tokenCookie", "def decode_token(token):\n try:\n # try to decode the token using our SECRET variable\n payload = jwt.decode(token, app.config.get('SECRET_KEY'), algorithms=['HS256'])\n return payload['sub']\n except jwt.ExpiredSignatureError:\n # the token is expired, return an error string\n return \"Expired token. Please login to get a new token\"\n except jwt.InvalidTokenError:\n # the token is invalid, return an error string\n return \"Invalid token. Please register or login\"", "def _get_token(self):\n return user.get_token()", "def decode(token):\n return jwt.decode(token, app.config[\"JWT_SECRET\"], algorithms=[\"HS256\"])", "def decode_token(token):\n decoded_token = jwt.decode(token, secret_key, algorithms=['HS256'])\n return decoded_token", "def get_payload(cls, token):\n \n secret = cls.secret\n algo = cls.algo\n decoded = jwt.decode(token, secret, algo)\n return decoded" ]
[ "0.6786535", "0.6588912", "0.6564999", "0.6549351", "0.65473354", "0.6426423", "0.64007133", "0.6390965", "0.6321878", "0.63127387", "0.6308861", "0.63074374", "0.62886924", "0.62571794", "0.6218154", "0.6218154", "0.61798644", "0.61466336", "0.61005646", "0.6099376", "0.6099376", "0.60896754", "0.60170996", "0.6011757", "0.6001665", "0.5987707", "0.5979479", "0.5932924", "0.5922859", "0.5914152" ]
0.7777419
0
Testing to do a scrap of consumed material.
def test_manufacturing_scrap(self): # Update demo products (self.product_4 | self.product_2).write({ 'tracking': 'lot', }) # Update Bill Of Material to remove product with phantom bom. self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink() # Create Inventory Adjustment For Stick and Stone Tools with lot. lot_product_4 = self.env['stock.production.lot'].create({ 'name': '0000000000001', 'product_id': self.product_4.id, 'company_id': self.env.company.id, }) lot_product_2 = self.env['stock.production.lot'].create({ 'name': '0000000000002', 'product_id': self.product_2.id, 'company_id': self.env.company.id, }) stock_inv_product_4 = self.env['stock.inventory'].create({ 'name': 'Stock Inventory for Stick', 'product_ids': [(4, self.product_4.id)], 'line_ids': [ (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}), ]}) stock_inv_product_2 = self.env['stock.inventory'].create({ 'name': 'Stock Inventory for Stone Tools', 'product_ids': [(4, self.product_2.id)], 'line_ids': [ (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id}) ]}) (stock_inv_product_4 | stock_inv_product_2)._action_start() stock_inv_product_2.action_validate() stock_inv_product_4.action_validate() #Create Manufacturing order. production_form = Form(self.env['mrp.production']) production_form.product_id = self.product_6 production_form.bom_id = self.bom_3 production_form.product_qty = 12 production_form.product_uom_id = self.product_6.uom_id production_3 = production_form.save() production_3.action_confirm() production_3.action_assign() # Check Manufacturing order's availability. self.assertEqual(production_3.reservation_state, 'assigned', "Production order's availability should be Available.") location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id, # Scrap Product Wood without lot to check assert raise ?. scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id}) with self.assertRaises(UserError): scrap_id.do_scrap() # Scrap Product Wood with lot. self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id}) #Check scrap move is created for this production order. #TODO: should check with scrap objects link in between
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_extract_recipe_from_website(self):\n pass", "def _scrape(self):", "def test_JCB_VISUAL_MATERIALS( self ):\n driver = self.driver\n driver.get(self.base_url + \"/record=b5660654~S6\")\n driver.find_element_by_link_text(\"Request\").click()\n self.assertTrue( 'aeon' in driver.current_url )\n self.assertTrue( 'ReferenceNumber=b5660654' in driver.current_url )\n self.assertTrue( 'ItemTitle=Thomas%20Jefferson' in driver.current_url )\n self.assertTrue( 'ItemAuthor=&ItemPublisher' in driver.current_url )\n self.assertTrue( 'ItemPublisher=Princeton' in driver.current_url )\n self.assertTrue( 'CallNumber=VHS' in driver.current_url )\n # self.assertTrue( 'Notes=(bibnum%3A%20b5660654)' in driver.current_url )\n self.assertEqual( 'ItemInfo2=', driver.current_url[-10:] )", "def test():\n from datas import whatlinks_page\n\n pages = whatlinks_page.whatlinks\n\n for qid in extract_linked_items(pages):\n page = get_itempage(qid)\n try:\n page.get()\n substitute_item_in_dataset(page, get_itempage(\"Q1660508\"), get_itempage(\"Q1622272\") )\n\n except Exception as exc:\n print('wow : <{}> ({}) something is wrong.'.format(exc, type(exc)))", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_scrape_multiple(self):\n self.assertEqual(self.scrapes[0].title, 'First article')\n self.assertEqual(self.scrapes[0].content, ['First para', 'Second para'])\n self.assertEqual(self.scrapes[1].title, 'Second article')\n self.assertEqual(self.scrapes[1].content, ['Para 1', 'Para 2'])\n self.assertEqual(self.scrapes[2].title, 'Third article')\n self.assertEqual(self.scrapes[2].content, ['Thing one', 'Thing two'])", "def test_scrape(self):\n self.assertEqual(self.scraped.title, 'Heading!')\n self.assertEqual(self.scraped.link_text, 'Go to Google')\n self.assertEqual(self.scraped.link_url, 'http://Google.com')", "def test_get_recipe_information(self):\n pass", "def test_create_material_multi_over(self):\n expected_materials_01 = [\n ['cotton'],\n ['cotton'],\n ['wool', 'AAA', 'BBB', 'CCC'],\n ]\n\n expected_materials_02 = [\n ['cotton', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],\n ['cotton', '00', '01', '02', '03', '04', '05', '06', '07', '08', '09', '10', '11'],\n ['wool', 'AAA', 'BBB', 'CCC', '00', '01', '02', '03', '04', '05', '06', '07', '08'],\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n # deselect 2, 3\n bp.click_on_listings(['Second something 1235 (2)', 'Third something LG-512a (3)'])\n\n # append AAA BBB CCC materials to the 1st listing\n send_keys(bp.operation_input(), 'AAA,BBB ,CCC')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials_01\n\n # append 00, 01, 02... to all listings\n bp.click_on_listings(['Second something 1235 (2)', 'Third something LG-512a (3)'])\n send_keys(bp.operation_input(), '00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, 13, 14, 15')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials_02", "def scrape(self):\n pass", "def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])", "def test_scrape_results(self):\n self.assertIsInstance(self.scrapes, EntityList)\n self.assertEqual(len(self.scrapes), 3)\n self.assertEqual([s.title for s in self.scrapes[1:]], ['Second article', 'Third article'])", "def test_cards_get(self):\n pass", "def test_get_art_info(self):\n pass", "def test_gethardwares_item(self):\n pass", "def test_get_analyzed_recipe_instructions(self):\n pass", "def test_direct_usage(self):\n r = RscHtmlReader()\n fname = '10.1039_C6OB02074G.html'\n f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'rsc', fname), 'rb')\n content = f.read()\n d = r.readstring(content)\n self.assertEqual(len(d.elements), 60)", "def parse(self, response):\n self.driver.get(response.url)\n product_category=response.meta[\"category_text\"]\n products=response.xpath(\"//*[(@class='list-item')]\")\n \n # item containers for storing product\n items = CrawlingECommerceItem()\n \n # iterating over search results\n # for product in products:\n # # Defining the XPaths\n # XPATH_PRODUCT_LINK=\".//*[contains(concat( ' ', @class, ' ' ), concat( ' ', 'goods-tit', ' ' ))]//a\"\n # XPATH_PRODUCT_NAME=\".//div[@class='goods-introudce']//a/@href\"\n # XPATH_PRODUCT_PRICE=\".//div[@class='catalog-detail']//div[@class='detail-right']//p/text()\"\n # XPATH_PRODUCT_IMAGE_LINK=\".//img\"\n\n # raw_product_name=product.xpath(XPATH_PRODUCT_NAME).get()\n # raw_product_price=product.xpath(XPATH_PRODUCT_PRICE).get()\n # raw_product_image_link=product.xpath(XPATH_PRODUCT_IMAGE_LINK).extract()\n # raw_product_link=product.xpath(XPATH_PRODUCT_LINK).get()\n\n # # cleaning the data\n # product_name=''.join(raw_product_name).strip(\n # ) if raw_product_name else None\n # product_price=''.join(raw_product_price).strip(\n # ) if raw_product_price else None\n # product_image_link=''.join(raw_product_image_link).strip(\n # ) if raw_product_image_link else None\n # product_link=''.join(raw_product_link).strip(\n # ) if raw_product_link else None\n\n # # storing item\n # yield CrawlingECommerceItem (\n # product_name=product_name,\n # product_price=product_price,\n # product_url=product_link,\n # product_category=product_category,\n # image_urls=raw_product_image_link\n # )\n\n # # yield items\n \n # XPATH_PRAGINATION_LINK=\"//*[(@class='next right')]/a/@href\"\n\n yield response.follow(str(response.request.url), callback = self.parse, meta = {\"category_text\": product_category})", "def test_create_material(self):\n expected_materials = [\n ['cotton', 'AAA'],\n ['cotton', 'AAA'],\n ['wool', 'AAA'],\n ]\n\n select_listings_to_edit(self.driver)\n d = self.driver\n bp = BulkPage(d)\n\n send_keys(bp.operation_input(), 'AAA')\n click(bp.operation_apply())\n\n material_names = bp.material_names()\n assert material_names == expected_materials\n\n apply_class = bp.operation_apply().get_attribute('class')\n assert 'inactive' in apply_class.split(' ')", "def test_analyze_recipe_instructions(self):\n pass", "def test_display_review(self):\n\n result = self.client.get(\"/brand/P87985432\")\n self.assertIn(b\"ever ever\", result.data)", "def test_search_recipes(self):\n pass", "def test_gettem_using_get(self):\n pass", "def test_parse_valid(self):\n mock_scraper = MockCtdScraper()\n scrape_gen = mock_scraper.scrape(TEST_CHUNKSIZE)\n self.parser.parse(next(scrape_gen))", "def test_collect_demands(self):\n pass", "def test_private_fetch_law(self, mock_law_pages):\n\n with requests_mock.mock() as mock_requests:\n test_text = (self.input_html.rstrip('</div>') +\n \"\"\"><table><a href=\"gigi\">gigi</table>\n<table><tbody><a href=\"gogo\">gogo</table></tbody></div>\"\"\")\n mock_requests.get(agc_law.FIRST_PAGE, text=test_text, status_code=200)\n mock_storage = Mock()\n mock_law_pages.return_value = \"hulahoop\"\n sys.stdout = captured = StringIO()\n self.law._fetch_law(('test', agc_law.FIRST_PAGE), mock_storage)\n self.assertEqual(captured.getvalue(),\n 'Requesting page test\\n')\n self.assertIn(call('hulahoop'), mock_storage.extend.call_args_list)\n sys.stdout = sys.__stdout__", "def test_can_start_a_list_and_retrieve_it_later(self):\n self.browser.get('http://localhost:8000')\n\n # Ela notou que o título e o cabeçalho da página diz Listas de Tarefas\n self.assertIn('Listas de Tarefas', self.browser.title)\n self.fail('Finish the test!')\n\n # Ela é imediatamente convidada a colocar uma tarefa na lista\n\n # Ela digita \"Comprar coxinhas\" em uma caixa de texto (Edith é uma\n # PyLady)\n\n # Quando ela tecla Enter, a página atualiza, e agora a página lista\n # \"1: Comprar coxinha\" como um item em uma lista de tarefas\n\n # Ainda tem uma caixa de texto convidando-a a adicionar um outro item.\n # Ela digita \"Comer as coxinhas\" (Edith é muito metódica)\n\n # A página atualiza novamente, e agora mostra ambos os itens na lista\n # dela\n\n # Edith se pergunta se o site lembrará da sua lista. Então ela vê que o\n # site gerou uma URL única para ela - Tem um texto explicativo para\n # esse efeito.\n\n # Ela visita a URL - Sua lista de tarefas ainda está lá.", "def test_get_scan(self):\n pass", "def test_get_random_recipes(self):\n pass", "def test_get_scans(self):\n pass" ]
[ "0.64861304", "0.60452384", "0.60298216", "0.6010472", "0.6002515", "0.6002515", "0.5930737", "0.59229994", "0.5865279", "0.5826006", "0.5802774", "0.5802774", "0.56953853", "0.568463", "0.56727046", "0.56668943", "0.56484073", "0.56198096", "0.56014913", "0.5600095", "0.5565852", "0.5529819", "0.5511538", "0.5501881", "0.55010056", "0.54968446", "0.54958725", "0.5483249", "0.54816705", "0.54626405" ]
0.68666345
0
This test checks a tracked manufactured product will go to location defined in putaway strategy when the production is recorded with product.produce wizard.
def test_putaway_after_manufacturing_3(self): self.laptop.tracking = 'serial' mo_laptop = self.new_mo_laptop() serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id}) mo_form = Form(mo_laptop) mo_form.qty_producing = 1 mo_form.lot_producing_id = serial mo_laptop = mo_form.save() mo_laptop.button_mark_done() # We check if the laptop go in the depot and not in the stock move = mo_laptop.move_finished_ids location_dest = move.move_line_ids.location_dest_id self.assertEqual(location_dest.id, self.depot_location.id) self.assertNotEqual(location_dest.id, self.stock_location.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_generate_with_putaway(self):\n nbre_of_lines = 4\n shelf_location = self.env['stock.location'].create({\n 'name': 'shelf1',\n 'usage': 'internal',\n 'location_id': self.location_dest.id,\n })\n\n # Checks a first time without putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be the default one.\n self.assertEqual(move_line.location_dest_id.id, self.location_dest.id)\n\n # We need to activate multi-locations to use putaway rules.\n grp_multi_loc = self.env.ref('stock.group_stock_multi_locations')\n self.env.user.write({'groups_id': [(4, grp_multi_loc.id)]})\n # Creates a putaway rule\n putaway_product = self.env['stock.putaway.rule'].create({\n 'product_id': self.product_serial.id,\n 'location_in_id': self.location_dest.id,\n 'location_out_id': shelf_location.id,\n })\n\n # Checks now with putaway...\n move = self.get_new_move(nbre_of_lines)\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n form_wizard.next_serial_count = nbre_of_lines\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n # The location dest must be now the one from the putaway.\n self.assertEqual(move_line.location_dest_id.id, shelf_location.id)", "def test_manufacturing_scrap(self):\n\n # Update demo products\n (self.product_4 | self.product_2).write({\n 'tracking': 'lot',\n })\n\n # Update Bill Of Material to remove product with phantom bom.\n self.bom_3.bom_line_ids.filtered(lambda x: x.product_id == self.product_5).unlink()\n\n # Create Inventory Adjustment For Stick and Stone Tools with lot.\n lot_product_4 = self.env['stock.production.lot'].create({\n 'name': '0000000000001',\n 'product_id': self.product_4.id,\n 'company_id': self.env.company.id,\n })\n lot_product_2 = self.env['stock.production.lot'].create({\n 'name': '0000000000002',\n 'product_id': self.product_2.id,\n 'company_id': self.env.company.id,\n })\n\n stock_inv_product_4 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stick',\n 'product_ids': [(4, self.product_4.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_4.id, 'product_uom_id': self.product_4.uom_id.id, 'product_qty': 8, 'prod_lot_id': lot_product_4.id, 'location_id': self.stock_location_14.id}),\n ]})\n\n stock_inv_product_2 = self.env['stock.inventory'].create({\n 'name': 'Stock Inventory for Stone Tools',\n 'product_ids': [(4, self.product_2.id)],\n 'line_ids': [\n (0, 0, {'product_id': self.product_2.id, 'product_uom_id': self.product_2.uom_id.id, 'product_qty': 12, 'prod_lot_id': lot_product_2.id, 'location_id': self.stock_location_14.id})\n ]})\n (stock_inv_product_4 | stock_inv_product_2)._action_start()\n stock_inv_product_2.action_validate()\n stock_inv_product_4.action_validate()\n\n #Create Manufacturing order.\n production_form = Form(self.env['mrp.production'])\n production_form.product_id = self.product_6\n production_form.bom_id = self.bom_3\n production_form.product_qty = 12\n production_form.product_uom_id = self.product_6.uom_id\n production_3 = production_form.save()\n production_3.action_confirm()\n production_3.action_assign()\n\n # Check Manufacturing order's availability.\n self.assertEqual(production_3.reservation_state, 'assigned', \"Production order's availability should be Available.\")\n\n location_id = production_3.move_raw_ids.filtered(lambda x: x.state not in ('done', 'cancel')) and production_3.location_src_id.id or production_3.location_dest_id.id,\n\n # Scrap Product Wood without lot to check assert raise ?.\n scrap_id = self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'production_id': production_3.id})\n with self.assertRaises(UserError):\n scrap_id.do_scrap()\n\n # Scrap Product Wood with lot.\n self.env['stock.scrap'].with_context(active_model='mrp.production', active_id=production_3.id).create({'product_id': self.product_2.id, 'scrap_qty': 1.0, 'product_uom_id': self.product_2.uom_id.id, 'location_id': location_id, 'lot_id': lot_product_2.id, 'production_id': production_3.id})\n\n #Check scrap move is created for this production order.\n #TODO: should check with scrap objects link in between", "def test_buy_now(self):\n catalog_page = CatalogPage(self.driver)\n product_page = ProductPage(self.driver)\n payment_page = PaymentPage(self.driver)\n payment_review_page = PaymentReviewPage(self.driver)\n payment_info_page = PaymentInfoPage(self.driver)\n success_page = SuccessPage(self.driver)\n # buy the new product\n navigate_to(self.driver, ProductPage.URL(self.new_product['product']['title']))\n product_page.add_to_cart.click()\n # by an old product\n catalog_page.catalog.click()\n # Sort products to move the newly created to last page\n catalog_page.sorting_order.select_by_visible_text(\"Date, old to new\")\n catalog_page.image.random_click()\n product = product_page.product.get_text()\n product_page.add_to_cart.click()\n catalog_page.catalog.click()\n catalog_page.cart.click()\n payment_dic = {\n 'address' : f'{randint(1, 99999)} {random_name(5, 8)}',\n 'city' : \"San Francisco\",\n 'email_or_mobile_phone_number_input' : random_name(8) + \"@gmail.com\",\n 'last_name' : random_name(3, 12),\n 'zip_code' : '94107',\n }\n if randint(0, 1):\n payment_dic['first_name'] = random_name(4, 16)\n if randint(0, 1):\n payment_dic['address2'] = random_name(5)\n for _ in payment_dic:\n exec(f\"payment_page.{_}.enter(payment_dic['{_}'])\")\n payment_page.continue_to_shipping.click()\n payment_review_page.continue_to_payment.click()\n payment_info_page.full_address.get_text()\n # validate address\n for _ in ['address', 'city', 'zip_code']:\n assert_and_log(payment_dic[_] in payment_info_page.full_address.get_text(),\n f\"{_} in full address\")\n payment_info_page.enter_bogus_payment(1)\n assert_and_log(success_page.thank_you.find_visible_element(),\n \"'Thank you' appeared as a sign of successful transaction\",\n continue_on_error=False)\n validate(success_page.basic_validation_list)", "def test_single_quant_non_default_locations(self):\n pick = self.quant_1.create_picking(\n self.picking_type_pick,\n location_id=self.test_stock_location_01.id,\n location_dest_id=self.test_goodsout_location_02.id,\n )\n # Confirm default location used if non specified\n self.assertEqual(pick.location_id, self.test_stock_location_01)\n self.assertNotEqual(pick.location_id, self.picking_type_pick.default_location_src_id)\n # Confirm default dest location used if non specified\n self.assertEqual(pick.location_dest_id, self.test_goodsout_location_02)\n self.assertNotEqual(pick.location_id, self.picking_type_pick.default_location_dest_id)", "def test_updateLocationInL4NewInL3(self):\n sel = self.selenium\n # Login\n self.login()\n # Load the Shelter\n self.open_record(\"Shelter within L4 Location\")\n\n # Check that the location is set\n self.assertEqual(\"Specific Location in L4\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n location = sel.get_attribute(\"//a[starts-with(@onclick, 's3_viewMap')]/@onclick\")\n location_id = location.split(\"(\")[1].split(\")\")[0]\n self.assertEqual(location_id, sel.get_value(\"cr_shelter_location_id\"))\n # Check that the dropdown is set\n self.assertEqual(location_id, sel.get_value(\"gis_location_\"))\n\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n\n # Select the L3\n sel.select(\"gis_location_L3\", \"label=Turgeau\")\n # Check that L4 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Babiole\", sel.get_table(\"//div[@id='content']/div[2]/form/table.17.0\"))\n\n # Click on the Add button\n sel.click(\"gis_location_add-btn\")\n # Check that the components appear correctly\n self.failUnless(sel.is_visible(\"gis_location_name\"))\n self.failUnless(sel.is_visible(\"gis_location_name_label\"))\n self.failUnless(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failUnless(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failUnless(sel.is_visible(\"gis_location_map-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_advanced_div\"))\n # Check that components which should remain invisible, are\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # Fill in a Name & Address\n sel.type(\"gis_location_name\", \"New in L3\")\n sel.type(\"gis_location_addr_street\", \"5 Ruelle Chochotte\")\n\n # Open the Advanced Tab\n sel.click(\"gis_location_advanced_checkbox\")\n # Check that the components appear correctly\n self.failUnless(sel.is_visible(\"gis_location_lat_row\"))\n self.failUnless(sel.is_visible(\"gis_location_lon_row\"))\n\n # Fill in Lat & Lon\n sel.type(\"gis_location_lat\", \"18.53171116\")\n sel.type(\"gis_location_lon\", \"-72.33020758\")\n\n # Save the form (with changes)\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter updated\")\n # Shelter has correct location\n self.assertEqual(\"New in L3 (N 18.53171116 W -72.33020758)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n\n # Load again\n self.open_record(\"Shelter within L4 Location\")\n # Check that the location is set\n self.assertEqual(\"New in L3 (N 18.53171116 W -72.33020758)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n location = sel.get_attribute(\"//a[starts-with(@onclick, 's3_viewMap')]/@onclick\")\n location_id = location.split(\"(\")[1].split(\")\")[0]\n self.assertEqual(location_id, sel.get_value(\"cr_shelter_location_id\"))\n # Check that the dropdown is set\n self.assertEqual(location_id, sel.get_value(\"gis_location_\"))\n\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L2\"))\n self.failUnless(sel.is_visible(\"gis_location_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L3\"))\n self.failUnless(sel.is_visible(\"gis_location_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L4\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # Click on 'Details' button\n sel.click(\"gis_location_details-btn\")\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failUnless(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failUnless(sel.is_visible(\"gis_location_map-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_advanced_div\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_lat_row\"))\n self.failIf(sel.is_visible(\"gis_location_lon_row\"))\n\n # Check that the Street Address is populated\n self.assertEqual(\"5 Ruelle Chochotte\", sel.get_text(\"gis_location_addr_street\"))\n\n # Open the Advanced Tab\n sel.click(\"gis_location_advanced_checkbox\")\n # Check that the components appear correctly\n self.failUnless(sel.is_visible(\"gis_location_lat_row\"))\n self.failUnless(sel.is_visible(\"gis_location_lon_row\"))\n\n # Check that the Lat/Lon are populated\n self.assertEqual(\"18.53171116\", sel.get_value(\"gis_location_lat\"))\n self.assertEqual(\"-72.33020758\", sel.get_value(\"gis_location_lon\"))", "def test_locationInL0(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter within L0 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Create a new location\n sel.click(\"gis_location_add-btn\")\n sel.type(\"gis_location_name\", \"Specific Location in L0\")\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Specific Location in L0\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_update_goal(self):\n pass", "def test_locationInL2(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter within L2 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Select the L2\n sel.select(\"gis_location_L2\", \"label=Port-Au-Prince\")\n # Check that L3 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...MartissantTurgeau\", sel.get_table(\"//div[@id='content']/div[2]/form/table.15.0\"))\n # Create a new location\n sel.click(\"gis_location_add-btn\")\n sel.type(\"gis_location_name\", \"Specific Location in L2\")\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Specific Location in L2\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_product_produce_3_with_analytic(self):\n acc = self.env['account.account'].search([('code', '=', '115.01.01')])\n self.env.ref('product.product_category_all').write(\n {'property_valuation': 'real_time',\n 'property_stock_account_input_categ_id': acc.id,\n 'property_stock_account_output_categ_id': acc.id})\n self.test_product_produce_3()\n move_obj = self.env['account.move']\n self.assertTrue(\n (self.last_mo.account_analytic_in_id |\n self.last_mo.account_analytic_out_id),\n 'The MO does not have account analytic')\n for fmove in self.last_mo.move_finished_ids:\n # Checking analytic account\n am = move_obj.search([('stock_move_id', '=', fmove.id)])\n # Looking for the Journal entry related\n self.assertTrue(am, 'The move does not have an entry related')\n line = am.line_ids.filtered(\n lambda a: a.analytic_account_id ==\n self.last_mo.account_analytic_out_id)\n # It should have only one line with the same analytic\n self.assertEqual(\n len(line), 1,\n 'There is more than one line with the same analytic')\n # The line must have debit\n self.assertTrue(line.credit > 0)\n\n for fmove in self.last_mo.move_raw_ids:\n # Checking analytic account\n am = move_obj.search([('stock_move_id', '=', fmove.id)])\n # Looking for the Journal entry related\n self.assertTrue(am, 'The move does not have an entry related')\n line = am.line_ids.filtered(\n lambda a: a.analytic_account_id ==\n self.last_mo.account_analytic_in_id)\n # It should have only one line with the same analytic\n self.assertEqual(\n len(line), 1,\n 'There is more than one line with the same analytic')\n # The line must have credit\n self.assertTrue(line.debit > 0)", "def test_locationInL3(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter within L3 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Select the L2\n sel.select(\"gis_location_L2\", \"label=Port-Au-Prince\")\n # Check that L3 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...MartissantTurgeau\", sel.get_table(\"//div[@id='content']/div[2]/form/table.15.0\"))\n # Select the L3\n sel.select(\"gis_location_L3\", \"label=Martissant\")\n # Check that L4 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Carrefour Feuilles\", sel.get_table(\"//div[@id='content']/div[2]/form/table.17.0\"))\n # Create a new location\n sel.click(\"gis_location_add-btn\")\n sel.type(\"gis_location_name\", \"Specific Location in L3\")\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Specific Location in L3\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_locationL2(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter with an L2 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Select the L2\n sel.select(\"gis_location_L2\", \"label=Port-Au-Prince\")\n # Check that L3 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...MartissantTurgeau\", sel.get_table(\"//div[@id='content']/div[2]/form/table.15.0\"))\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Port-Au-Prince (Ouest)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_locationInL1(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter within L1 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Create a new location\n sel.click(\"gis_location_add-btn\")\n sel.type(\"gis_location_name\", \"Specific Location in L1\")\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Specific Location in L1\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_locationL0(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter with an L0 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Haiti\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_updateLocationEmptyL0(self):\n sel = self.selenium\n # Login\n self.login()\n # Load the Shelter\n self.open_record(\"Shelter with no Location\")\n # Check that the location is currently blank\n self.check_blank()\n\n # Save the form (without changes)\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter updated\")\n # Shelter has correct location\n self.assertEqual(\"-\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n\n # Load again\n self.open_record(\"Shelter with no Location\")\n # Check that the location is still blank\n self.check_blank()\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Save the form (with changes)\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter updated\")\n # Shelter has correct location\n self.assertEqual(\"Haiti\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n\n # Load again\n self.open_record(\"Shelter with no Location\")\n # Check that the location is set\n self.assertEqual(\"Haiti\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n location = sel.get_attribute(\"//a[starts-with(@onclick, 's3_viewMap')]/@onclick\")\n location_id = location.split(\"(\")[1].split(\")\")[0]\n self.assertEqual(location_id, sel.get_value(\"cr_shelter_location_id\"))\n # Check that the dropdown is set\n self.assertEqual(location_id, sel.get_value(\"gis_location_L0\"))\n\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L2\"))\n self.failIf(sel.is_visible(\"gis_location_label_L2\"))\n self.failIf(sel.is_visible(\"gis_location_L3\"))\n self.failIf(sel.is_visible(\"gis_location_label_L3\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))", "def test_locationL3(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter with an L3 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Select the L2\n sel.select(\"gis_location_L2\", \"label=Port-Au-Prince\")\n # Check that L3 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...MartissantTurgeau\", sel.get_table(\"//div[@id='content']/div[2]/form/table.15.0\"))\n # Select the L3\n sel.select(\"gis_location_L3\", \"label=Martissant\")\n # Check that L4 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Carrefour Feuilles\", sel.get_table(\"//div[@id='content']/div[2]/form/table.17.0\"))\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Martissant (Port-Au-Prince)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_locationL1(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter with an L1 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Ouest (Haiti)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_households_in_admin_unit(self):", "def test_update_location_premium(self):\n\n url = reverse('location-detail', args=(self.location.id,))\n data = {\n 'point': 200,\n }\n json_data = json.dumps(data)\n self.client.credentials(HTTP_AUTHORIZATION=self.token)\n response = self.client.put(path=url, content_type='application/json', data=json_data)\n self.assertEqual(first=400, second=response.status_code)", "def test_post_activate_marketplace_vendor_v3(self):\n pass", "def setUp(self):\n self.location = Location.get(TEST_LOCATION)", "def test_updateLocationL0Empty(self):\n sel = self.selenium\n # Login\n self.login()\n # Load the Shelter\n self.open_record(\"Shelter with no Location\")\n # Check that the location is currently set\n self.assertEqual(\"Haiti\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n location = sel.get_attribute(\"//a[starts-with(@onclick, 's3_viewMap')]/@onclick\")\n location_id = location.split(\"(\")[1].split(\")\")[0]\n self.assertEqual(location_id, sel.get_value(\"cr_shelter_location_id\"))\n # Check that the dropdown is set\n self.assertEqual(location_id, sel.get_value(\"gis_location_L0\"))\n\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L1\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L2\"))\n self.failIf(sel.is_visible(\"gis_location_label_L2\"))\n self.failIf(sel.is_visible(\"gis_location_L3\"))\n self.failIf(sel.is_visible(\"gis_location_label_L3\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_\"))\n self.failIf(sel.is_visible(\"gis_location_label_\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_details-btn\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n\n # De-select the L0\n sel.select(\"gis_location_L0\", \"label=Select a location...\")\n # Check that the real location has been set to blank\n self.assertEqual(\"\", sel.get_value(\"cr_shelter_location_id\"))\n # Check that L1 dropdown disappears correctly\n time.sleep(1)\n self.failIf(sel.is_visible(\"gis_location_L1\"))\n self.failIf(sel.is_visible(\"gis_location_label_L1\"))\n # Save the form (with changes)\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter updated\")\n # Shelter has correct location\n self.assertEqual(\"-\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_kit_planned_transfer(self):\n picking = self.env['stock.picking'].create({\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n 'partner_id': self.test_partner.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'immediate_transfer': False,\n })\n move_receipt_1 = self.env['stock.move'].create({\n 'name': self.kit_parent.name,\n 'product_id': self.kit_parent.id,\n 'product_uom_qty': 3,\n 'product_uom': self.kit_parent.uom_id.id,\n 'picking_id': picking.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n })\n picking.action_confirm()\n\n # We check that the picking has the correct quantities after its move were splitted.\n self.assertEqual(len(picking.move_lines), 7)\n for move_line in picking.move_lines:\n self.assertEqual(move_line.product_qty, self.expected_quantities[move_line.product_id])", "def test_locationInL4(self):\n sel = self.selenium\n # Login\n self.login()\n self.create_header()\n # Fill in the mandatory fields\n sel.type(\"cr_shelter_name\", \"Shelter within L4 Location\")\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Select the L1\n sel.select(\"gis_location_L1\", \"label=Ouest\")\n # Check that L2 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Port-Au-Prince\", sel.get_table(\"//div[@id='content']/div[2]/form/table.13.0\"))\n # Select the L2\n sel.select(\"gis_location_L2\", \"label=Port-Au-Prince\")\n # Check that L3 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...MartissantTurgeau\", sel.get_table(\"//div[@id='content']/div[2]/form/table.15.0\"))\n # Select the L3\n sel.select(\"gis_location_L3\", \"label=Martissant\")\n # Check that L4 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Carrefour Feuilles\", sel.get_table(\"//div[@id='content']/div[2]/form/table.17.0\"))\n # Select the L4\n sel.select(\"gis_location_L4\", \"label=Carrefour Feuilles\")\n # Check that specific location dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Clinique Communautaire de Martissant\", sel.get_table(\"//div[@id='content']/div[2]/form/table.19.0\"))\n # Create a new location\n sel.click(\"gis_location_add-btn\")\n sel.type(\"gis_location_name\", \"Specific Location in L4\")\n # Save the form\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter added\")\n # Shelter has correct location\n self.assertEqual(\"Specific Location in L4\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_search_product_detail(self, setup):\n product_id = self.nutella.id\n path = reverse('website:detail', args=(product_id,))\n assert resolve(path).view_name == 'website:detail'", "def test_updateLocationNoParentL0(self):\n sel = self.selenium\n # Login\n self.login()\n # Load the Shelter\n self.open_record(\"Shelter with no Parent\")\n\n # Check that the location is set\n self.assertEqual(\"New parentless Location (N 51.0 E 1.0)\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))\n location = sel.get_attribute(\"//a[starts-with(@onclick, 's3_viewMap')]/@onclick\")\n location_id = location.split(\"(\")[1].split(\")\")[0]\n self.assertEqual(location_id, sel.get_value(\"cr_shelter_location_id\"))\n # Check that the dropdown is set\n self.assertEqual(location_id, sel.get_value(\"gis_location_\"))\n\n # Check that the components which should be visible, are\n self.failUnless(sel.is_visible(\"gis_location_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_label_L0\"))\n self.failUnless(sel.is_visible(\"gis_location_\"))\n self.failUnless(sel.is_visible(\"gis_location_label_\"))\n self.failUnless(sel.is_visible(\"gis_location_details-btn\"))\n self.failUnless(sel.is_visible(\"gis_location_add-btn\"))\n # Check that the components which should be hidden, are\n self.failIf(sel.is_visible(\"cr_shelter_location_id\"))\n self.failIf(sel.is_visible(\"gis_location_L1\"))\n self.failIf(sel.is_visible(\"gis_location_label_L1\"))\n self.failIf(sel.is_visible(\"gis_location_L2\"))\n self.failIf(sel.is_visible(\"gis_location_label_L2\"))\n self.failIf(sel.is_visible(\"gis_location_L3\"))\n self.failIf(sel.is_visible(\"gis_location_label_L3\"))\n self.failIf(sel.is_visible(\"gis_location_L4\"))\n self.failIf(sel.is_visible(\"gis_location_label_L4\"))\n self.failIf(sel.is_visible(\"gis_location_name\"))\n self.failIf(sel.is_visible(\"gis_location_name_label\"))\n self.failIf(sel.is_visible(\"gis_location_cancel-btn\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_row\"))\n self.failIf(sel.is_visible(\"gis_location_addr_street_label\"))\n self.failIf(sel.is_visible(\"gis_location_map-btn\"))\n self.failIf(sel.is_visible(\"gis_location_advanced_div\"))\n\n # Select the L0\n sel.select(\"gis_location_L0\", \"label=Haiti\")\n # Check that L1 dropdown appears correctly\n time.sleep(4)\n self.assertEqual(\"Select a location...Ouest\", sel.get_table(\"//div[@id='content']/div[2]/form/table.11.0\"))\n # Save the form (with changes)\n sel.click(\"//input[@value='Save']\")\n sel.wait_for_page_to_load(\"30000\")\n # Shelter saved\n self.action.successMsg(\"Shelter updated\")\n # Shelter has correct location\n self.assertEqual(\"Haiti\", sel.get_table(\"//div[@id='rheader']/div/table.1.1\"))", "def test_create_goal(self):\n pass", "def test_create_shipment(self):\n pass", "def test_training_location(self):\n self.assertIsInstance(self.one_off_training.location, Location)\n self.assertEqual(self.one_off_training.location, self.location)", "def test_plant_harvest():\n plant = plant_factory()\n user = plant.user\n plant.user_active = user\n plant.harvest()\n assert plant.dead\n assert plant.user_active is None\n assert plant.get(user_active=user)", "def test_post_voltage_maps(self):\n pass" ]
[ "0.68906146", "0.6246047", "0.6090114", "0.6084227", "0.6050358", "0.6022465", "0.5951435", "0.592476", "0.59018815", "0.5867885", "0.58634603", "0.58565384", "0.58413273", "0.58121234", "0.58035403", "0.5710238", "0.5670679", "0.5666164", "0.564475", "0.5602386", "0.5602123", "0.5572282", "0.55630136", "0.5558942", "0.5558823", "0.55381566", "0.5525468", "0.54971665", "0.5489381", "0.5486223" ]
0.7612206
0
Make sure a kit is split in the corrects quantity_done by components in case of an immediate transfer.
def test_kit_immediate_transfer(self): picking = self.env['stock.picking'].create({ 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, 'partner_id': self.test_partner.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'immediate_transfer': True }) move_receipt_1 = self.env['stock.move'].create({ 'name': self.kit_parent.name, 'product_id': self.kit_parent.id, 'quantity_done': 3, 'product_uom': self.kit_parent.uom_id.id, 'picking_id': picking.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, }) picking.button_validate() # We check that the picking has the correct quantities after its move were splitted. self.assertEqual(len(picking.move_lines), 7) for move_line in picking.move_lines: self.assertEqual(move_line.quantity_done, self.expected_quantities[move_line.product_id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_kit_planned_transfer(self):\n picking = self.env['stock.picking'].create({\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n 'partner_id': self.test_partner.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'immediate_transfer': False,\n })\n move_receipt_1 = self.env['stock.move'].create({\n 'name': self.kit_parent.name,\n 'product_id': self.kit_parent.id,\n 'product_uom_qty': 3,\n 'product_uom': self.kit_parent.uom_id.id,\n 'picking_id': picking.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n })\n picking.action_confirm()\n\n # We check that the picking has the correct quantities after its move were splitted.\n self.assertEqual(len(picking.move_lines), 7)\n for move_line in picking.move_lines:\n self.assertEqual(move_line.product_qty, self.expected_quantities[move_line.product_id])", "def _check_overprocessed_subcontract_qty(self):\n overprocessed_moves = self.env['stock.move']\n for move in self:\n if not move.is_subcontract:\n continue\n # Extra quantity is allowed when components do not need to be register\n if not move._has_tracked_subcontract_components():\n continue\n rounding = move.product_uom.rounding\n if float_compare(move.quantity_done, move.move_orig_ids.production_id.qty_produced, precision_rounding=rounding) > 0:\n overprocessed_moves |= move\n if overprocessed_moves:\n raise UserError(_(\"\"\"\nYou have to use 'Records Components' button in order to register quantity for a\nsubcontracted product(s) with tracked component(s):\n %s.\nIf you want to process more than initially planned, you\ncan use the edit + unlock buttons in order to adapt the initial demand on the\noperations.\"\"\") % ('\\n'.join(overprocessed_moves.mapped('product_id.display_name'))))", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def stock_move_action_done(self):\n\t\tself.filtered(lambda move: move.state == 'draft').action_confirm()\n\n\t\tUom = self.env['product.uom']\n\t\tQuant = self.env['stock.quant']\n\n\t\tpickings = self.env['stock.picking']\n\t\tprocurements = self.env['procurement.order']\n\t\toperations = self.env['stock.pack.operation']\n\n\t\tremaining_move_qty = {}\n\n\t\tfor move in self:\n\t\t\tif move.picking_id:\n\t\t\t\tpickings |= move.picking_id\n\t\t\tremaining_move_qty[move.id] = move.product_qty\n\t\t\tfor link in move.linked_move_operation_ids:\n\t\t\t\toperations |= link.operation_id\n\t\t\t\tpickings |= link.operation_id.picking_id\n\n\t\t# Sort operations according to entire packages first, then package + lot, package only, lot only\n\t\toperations = operations.sorted(\n\t\t\tkey=lambda x: ((x.package_id and not x.product_id) and -4 or 0) + (x.package_id and -2 or 0) + (\n\t\t\tx.pack_lot_ids and -1 or 0))\n\n\t\tfor operation in operations:\n\n\t\t\t# product given: result put immediately in the result package (if False: without package)\n\t\t\t# but if pack moved entirely, quants should not be written anything for the destination package\n\t\t\tquant_dest_package_id = operation.product_id and operation.result_package_id.id or False\n\t\t\tentire_pack = not operation.product_id and True or False\n\n\t\t\t# compute quantities for each lot + check quantities match\n\t\t\tlot_quantities = dict((pack_lot.lot_id.id, operation.product_uom_id._compute_quantity(pack_lot.qty,\n\t\t\t operation.product_id.uom_id)\n\t\t\t ) for pack_lot in operation.pack_lot_ids)\n\n\t\t\tqty = operation.product_qty\n\t\t\tif operation.product_uom_id and operation.product_uom_id != operation.product_id.uom_id:\n\t\t\t\tqty = operation.product_uom_id._compute_quantity(qty, operation.product_id.uom_id)\n\t\t\tif operation.pack_lot_ids and float_compare(sum(lot_quantities.values()), qty,\n\t\t\t precision_rounding=operation.product_id.uom_id.rounding) != 0.0:\n\t\t\t\traise UserError(_(\n\t\t\t\t\t'You have a difference between the quantity on the operation and the quantities specified for the lots. '))\n\n\t\t\tquants_taken = []\n\t\t\tfalse_quants = []\n\t\t\tlot_move_qty = {}\n\n\t\t\tprout_move_qty = {}\n\t\t\tfor link in operation.linked_move_operation_ids:\n\t\t\t\tprout_move_qty[link.move_id] = prout_move_qty.get(link.move_id, 0.0) + link.qty\n\n\t\t\t# Process every move only once for every pack operation\n\t\t\tfor move in prout_move_qty.keys():\n\t\t\t\t# TDE FIXME: do in batch ?\n\t\t\t\tmove.check_tracking(operation)\n\n\t\t\t\t# TDE FIXME: I bet the message error is wrong\n\t\t\t\t# if not remaining_move_qty.get(move.id):\n\t\t\t\t# \traise UserError(_(\n\t\t\t\t# \t\t\"The roundings of your unit of measure %s on the move vs. %s on the product don't allow to do these operations or you are not transferring the picking at once. \") % (\n\t\t\t\t# \t move.product_uom.name, move.product_id.uom_id.name))\n\n\t\t\t\tif not operation.pack_lot_ids:\n\t\t\t\t\tpreferred_domain_list = [[('reservation_id', '=', move.id)], [('reservation_id', '=', False)],\n\t\t\t\t\t ['&', ('reservation_id', '!=', move.id),\n\t\t\t\t\t ('reservation_id', '!=', False)]]\n\t\t\t\t\tquants = Quant.quants_get_preferred_domain(\n\t\t\t\t\t\tprout_move_qty[move], move, ops=operation, domain=[('qty', '>', 0)],\n\t\t\t\t\t\tpreferred_domain_list=preferred_domain_list)\n\t\t\t\t\tQuant.quants_move(quants, move, operation.location_dest_id, location_from=operation.location_id,\n\t\t\t\t\t lot_id=False, owner_id=operation.owner_id.id,\n\t\t\t\t\t src_package_id=operation.package_id.id,\n\t\t\t\t\t dest_package_id=quant_dest_package_id, entire_pack=entire_pack)\n\t\t\t\telse:\n\t\t\t\t\t# Check what you can do with reserved quants already\n\t\t\t\t\tqty_on_link = prout_move_qty[move]\n\t\t\t\t\trounding = operation.product_id.uom_id.rounding\n\t\t\t\t\tfor reserved_quant in move.reserved_quant_ids:\n\t\t\t\t\t\tif (reserved_quant.owner_id.id != operation.owner_id.id) or (\n\t\t\t\t\t\t\treserved_quant.location_id.id != operation.location_id.id) or \\\n\t\t\t\t\t\t\t\t(reserved_quant.package_id.id != operation.package_id.id):\n\t\t\t\t\t\t\tcontinue\n\t\t\t\t\t\tif not reserved_quant.lot_id:\n\t\t\t\t\t\t\tfalse_quants += [reserved_quant]\n\t\t\t\t\t\telif float_compare(lot_quantities.get(reserved_quant.lot_id.id, 0), 0,\n\t\t\t\t\t\t precision_rounding=rounding) > 0:\n\t\t\t\t\t\t\tif float_compare(lot_quantities[reserved_quant.lot_id.id], reserved_quant.qty,\n\t\t\t\t\t\t\t precision_rounding=rounding) >= 0:\n\t\t\t\t\t\t\t\tlot_quantities[reserved_quant.lot_id.id] -= reserved_quant.qty\n\t\t\t\t\t\t\t\tquants_taken += [(reserved_quant, reserved_quant.qty)]\n\t\t\t\t\t\t\t\tqty_on_link -= reserved_quant.qty\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tquants_taken += [(reserved_quant, lot_quantities[reserved_quant.lot_id.id])]\n\t\t\t\t\t\t\t\tlot_quantities[reserved_quant.lot_id.id] = 0\n\t\t\t\t\t\t\t\tqty_on_link -= lot_quantities[reserved_quant.lot_id.id]\n\t\t\t\t\tlot_move_qty[move.id] = qty_on_link\n\n\t\t\t\tremaining_move_qty[move.id] -= prout_move_qty[move]\n\n\t\t\t# Handle lots separately\n\t\t\tif operation.pack_lot_ids:\n\t\t\t\t# TDE FIXME: fix call to move_quants_by_lot to ease understanding\n\t\t\t\tself._move_quants_by_lot(operation, lot_quantities, quants_taken, false_quants, lot_move_qty,\n\t\t\t\t quant_dest_package_id)\n\n\t\t\t# Handle pack in pack\n\t\t\tif not operation.product_id and operation.package_id and operation.result_package_id.id != operation.package_id.parent_id.id:\n\t\t\t\toperation.package_id.sudo().write({'parent_id': operation.result_package_id.id})\n\n\t\t# Check for remaining qtys and unreserve/check move_dest_id in\n\t\tmove_dest_ids = set()\n\t\tfor move in self:\n\t\t\tif float_compare(remaining_move_qty[move.id], 0,\n\t\t\t precision_rounding=move.product_id.uom_id.rounding) > 0: # In case no pack operations in picking\n\t\t\t\tmove.check_tracking(False) # TDE: do in batch ? redone ? check this\n\n\t\t\t\tpreferred_domain_list = [[('reservation_id', '=', move.id)], [('reservation_id', '=', False)],\n\t\t\t\t ['&', ('reservation_id', '!=', move.id), ('reservation_id', '!=', False)]]\n\t\t\t\tquants = Quant.quants_get_preferred_domain(\n\t\t\t\t\tremaining_move_qty[move.id], move, domain=[('qty', '>', 0)],\n\t\t\t\t\tpreferred_domain_list=preferred_domain_list)\n\t\t\t\tQuant.quants_move(\n\t\t\t\t\tquants, move, move.location_dest_id,\n\t\t\t\t\tlot_id=move.restrict_lot_id.id, owner_id=move.restrict_partner_id.id)\n\n\t\t\t# If the move has a destination, add it to the list to reserve\n\t\t\tif move.move_dest_id and move.move_dest_id.state in ('waiting', 'confirmed'):\n\t\t\t\tmove_dest_ids.add(move.move_dest_id.id)\n\n\t\t\tif move.procurement_id:\n\t\t\t\tprocurements |= move.procurement_id\n\n\t\t\t# unreserve the quants and make them available for other operations/moves\n\t\t\tmove.quants_unreserve()\n\n\t\t# Check the packages have been placed in the correct locations\n\t\tself.mapped('quant_ids').filtered(lambda quant: quant.package_id and quant.qty > 0).mapped(\n\t\t\t'package_id')._check_location_constraint()\n\n\t\t# set the move as done\n\t\tself.write({'state': 'done', 'date': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n\t\tprocurements.check()\n\t\t# assign destination moves\n\t\tif move_dest_ids:\n\t\t\t# TDE FIXME: record setise me\n\t\t\tself.browse(list(move_dest_ids)).action_assign_stock_move()\n\n\t\tpickings.filtered(lambda picking: picking.state == 'done' and not picking.date_done).write(\n\t\t\t{'date_done': time.strftime(DEFAULT_SERVER_DATETIME_FORMAT)})\n\n\t\treturn True", "def ingredient_used_canceled(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used canceled initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.used.id)]\n , order=[('batch_number', 'DESC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n # pdb.set_trace()\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.used, to_location=self.kitchen, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.kitchen, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def _action_done(self):\r\n\r\n # First, we loop over all the move lines to do a preliminary check: `qty_done` should not\r\n # be negative and, according to the presence of a picking type or a linked inventory\r\n # adjustment, enforce some rules on the `lot_id` field. If `qty_done` is null, we unlink\r\n # the line. It is mandatory in order to free the reservation and correctly apply\r\n # `action_done` on the next move lines.\r\n ml_to_delete = self.env['stock.move.line']\r\n for ml in self:\r\n # Check here if `ml.qty_done` respects the rounding of `ml.product_uom_id`.\r\n uom_qty = float_round(ml.qty_done, precision_rounding=ml.product_uom_id.rounding, rounding_method='HALF-UP')\r\n precision_digits = self.env['decimal.precision'].precision_get('Product Unit of Measure')\r\n qty_done = float_round(ml.qty_done, precision_digits=precision_digits, rounding_method='HALF-UP')\r\n if float_compare(uom_qty, qty_done, precision_digits=precision_digits) != 0:\r\n raise UserError(_('The quantity done for the product \"%s\" doesn\\'t respect the rounding precision \\\r\n defined on the unit of measure \"%s\". Please change the quantity done or the \\\r\n rounding precision of your unit of measure.') % (ml.product_id.display_name, ml.product_uom_id.name))\r\n\r\n qty_done_float_compared = float_compare(ml.qty_done, 0, precision_rounding=ml.product_uom_id.rounding)\r\n if qty_done_float_compared > 0:\r\n if ml.product_id.tracking != 'none':\r\n picking_type_id = ml.move_id.picking_type_id\r\n if picking_type_id:\r\n if picking_type_id.use_create_lots:\r\n # If a picking type is linked, we may have to create a production lot on\r\n # the fly before assigning it to the move line if the user checked both\r\n # `use_create_lots` and `use_existing_lots`.\r\n if ml.lot_name and ml.date_reference and not ml.lot_id:\r\n lot = self.env['stock.production.lot'].create(\r\n {'name': ml.lot_name, 'product_id': ml.product_id.id, 'date_refer': ml.date_reference}\r\n )\r\n ml.write({'lot_id': lot.id})\r\n data_dates = ml.lot_id._get_dattes(ml.product_id.id,ml.date_reference)\r\n for field, value in data_dates.items():\r\n setattr(ml.lot_id, field, value)\r\n elif not picking_type_id.use_create_lots and not picking_type_id.use_existing_lots:\r\n # If the user disabled both `use_create_lots` and `use_existing_lots`\r\n # checkboxes on the picking type, he's allowed to enter tracked\r\n # products without a `lot_id`.\r\n continue\r\n elif ml.move_id.inventory_id:\r\n # If an inventory adjustment is linked, the user is allowed to enter\r\n # tracked products without a `lot_id`.\r\n continue\r\n\r\n if not ml.lot_id:\r\n raise UserError(_('You need to supply a lot/serial number for %s.') % ml.product_id.name)\r\n elif qty_done_float_compared < 0:\r\n raise UserError(_('No negative quantities allowed'))\r\n else:\r\n ml_to_delete |= ml\r\n ml_to_delete.unlink()\r\n\r\n # Now, we can actually move the quant.\r\n done_ml = self.env['stock.move.line']\r\n for ml in self - ml_to_delete:\r\n if ml.product_id.type == 'product':\r\n Quant = self.env['stock.quant']\r\n rounding = ml.product_uom_id.rounding\r\n\r\n # if this move line is force assigned, unreserve elsewhere if needed\r\n if not ml.location_id.should_bypass_reservation() and float_compare(ml.qty_done, ml.product_qty, precision_rounding=rounding) > 0:\r\n extra_qty = ml.qty_done - ml.product_qty\r\n ml._free_reservation(ml.product_id, ml.location_id, extra_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, ml_to_ignore=done_ml)\r\n # unreserve what's been reserved\r\n if not ml.location_id.should_bypass_reservation() and ml.product_id.type == 'product' and ml.product_qty:\r\n try:\r\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n except UserError:\r\n Quant._update_reserved_quantity(ml.product_id, ml.location_id, -ml.product_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n\r\n # move what's been actually done\r\n quantity = ml.product_uom_id._compute_quantity(ml.qty_done, ml.move_id.product_id.uom_id, rounding_method='HALF-UP')\r\n available_qty, in_date = Quant._update_available_quantity(ml.product_id, ml.location_id, -quantity, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\r\n if available_qty < 0 and ml.lot_id:\r\n # see if we can compensate the negative quants with some untracked quants\r\n untracked_qty = Quant._get_available_quantity(ml.product_id, ml.location_id, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id, strict=True)\r\n if untracked_qty:\r\n taken_from_untracked_qty = min(untracked_qty, abs(quantity))\r\n Quant._update_available_quantity(ml.product_id, ml.location_id, -taken_from_untracked_qty, lot_id=False, package_id=ml.package_id, owner_id=ml.owner_id)\r\n Quant._update_available_quantity(ml.product_id, ml.location_id, taken_from_untracked_qty, lot_id=ml.lot_id, package_id=ml.package_id, owner_id=ml.owner_id)\r\n Quant._update_available_quantity(ml.product_id, ml.location_dest_id, quantity, lot_id=ml.lot_id, package_id=ml.result_package_id, owner_id=ml.owner_id, in_date=in_date)\r\n done_ml |= ml\r\n # Reset the reserved quantity as we just moved it to the destination location.\r\n (self - ml_to_delete).with_context(bypass_reservation_update=True).write({\r\n 'product_uom_qty': 0.00,\r\n 'date': fields.Datetime.now(),\r\n })", "def test_putaway_after_manufacturing_3(self):\n self.laptop.tracking = 'serial'\n mo_laptop = self.new_mo_laptop()\n serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id})\n\n mo_form = Form(mo_laptop)\n mo_form.qty_producing = 1\n mo_form.lot_producing_id = serial\n mo_laptop = mo_form.save()\n mo_laptop.button_mark_done()\n\n # We check if the laptop go in the depot and not in the stock\n move = mo_laptop.move_finished_ids\n location_dest = move.move_line_ids.location_dest_id\n self.assertEqual(location_dest.id, self.depot_location.id)\n self.assertNotEqual(location_dest.id, self.stock_location.id)", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def _is_order_filled(self):\r\n if self.filled_quantity == self.quantity:\r\n self.order_finish()", "def test_generate_04_generate_in_multiple_time(self):\n nbre_of_lines = 10\n move = self.get_new_move(nbre_of_lines)\n\n form_wizard = Form(self.env['stock.assign.serial'].with_context(\n default_move_id=move.id,\n ))\n # First assignment\n form_wizard.next_serial_count = 3\n form_wizard.next_serial_number = '001'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n # Second assignment\n form_wizard.next_serial_count = 2\n form_wizard.next_serial_number = 'bilou-64'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n # Third assignment\n form_wizard.next_serial_count = 4\n form_wizard.next_serial_number = 'ro-1337-bot'\n wiz = form_wizard.save()\n wiz.generate_serial_numbers()\n\n # Checks all move lines have the right SN\n generated_numbers = [\n # Correspond to the first assignment\n '001', '002', '003',\n # Correspond to the second assignment\n 'bilou-64', 'bilou-65',\n # Correspond to the third assignment\n 'ro-1337-bot', 'ro-1338-bot', 'ro-1339-bot', 'ro-1340-bot',\n ]\n self.assertEqual(len(move.move_line_ids), nbre_of_lines + len(generated_numbers))\n self.assertEqual(len(move.move_line_nosuggest_ids), len(generated_numbers))\n for move_line in move.move_line_nosuggest_ids:\n self.assertEqual(move_line.qty_done, 1)\n self.assertEqual(move_line.lot_name, generated_numbers.pop(0))\n for move_line in (move.move_line_ids - move.move_line_nosuggest_ids):\n self.assertEqual(move_line.qty_done, 0)\n self.assertEqual(move_line.lot_name, False)", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = float(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def test_initial_risk_position_sizer_without_cap(self):\n fraction_at_risk = 0.23\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n target_quantity = int(np.floor(portfolio_value * self.initial_risk / fraction_at_risk))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_fifo_with_nones(self):\n # Leave quant1, quant 2 with `in_date: False`\n # Leave quant 2 with no package, set quant1 and quant2 packages.\n self.quant1.write({\"package_id\": self.pack1.id})\n self.quant3.write({\"package_id\": self.pack3.id, \"in_date\": datetime.now()})\n\n # Reserve quantity - one apple\n reserved_quants = self.Quant._update_reserved_quantity(\n self.apple, self.test_stock_location_01, 1\n )\n reserved_quant = reserved_quants[0][0]\n\n self.assertFalse(reserved_quant.in_date)\n self.assertFalse(reserved_quant.package_id)\n self.assertEqual(reserved_quant, self.quant2)", "def action_stagger_purchase_delivery(self):\n for wizard in self:\n #On vérifie que la quantité entrée est inférieure à la quantité de la ligne \n #d'achat \n purchase_line = wizard.purchase_line_id\n price_unit = purchase_line.price_unit\n if wizard.quantity <= 0:\n raise except_orm(_(\"Error\"), _('You must enter a quantity superior to 0'))\n \n if wizard.quantity >= purchase_line.sec_uom_qty:\n raise except_orm(_(\"Error\"), _('You must enter a quantity inferior to the initial purchase '\n 'line quantity'))\n \n #On récupère les valeurs entrées dans le wizard\n values = {'sec_uom_qty': wizard.quantity,\n 'expected_date': wizard.new_date}\n new_line = purchase_line.copy(values)\n new_line._onchange_sec_uom_qty(with_warning=False)\n new_line._onchange_uom_qty()\n new_line._onchange_uoi_qty()\n new_line.write({'price_unit': price_unit})\n #On décrémente la ligne initiale de la quantité de la nouvelle ligne (car celle-ci respecte forcément\n #le multiple et le minimum\n purchase_line.write({'sec_uom_qty': purchase_line.sec_uom_qty - wizard.quantity})\n purchase_line._onchange_sec_uom_qty(with_warning=False)\n purchase_line._onchange_uom_qty()\n purchase_line._onchange_uoi_qty()\n purchase_line.write({'price_unit': price_unit})\n #On retourne l'achat\n if wizard.purchase_id:\n action_dict = get_form_view(self, 'purchase.purchase_order_see_form')\n if action_dict and action_dict.get('id') and action_dict.get('type'):\n action = self.env[action_dict['type']].browse(action_dict['id'])\n action_struc = action.read()\n action_struc[0]['res_id'] = wizard.purchase_id.id\n action_struc = action_struc[0]\n \n return action_struc\n else:\n return {'type': 'ir.actions.act_window_close'}", "def calc_qty(self, cr, uid, production_id, context=None):\n prod = self.pool.get('mrp.production').browse(cr, uid,production_id\n , context=context)\n done = 0.0\n for wo in prod.workcenter_lines:\n for mrej in wo.moves_rejection:\n done += mrej.s_rejected_qty or 0.0\n for move in prod.move_created_ids2:\n if move.product_id == prod.product_id:\n #ignore scrapped and extra consumed\n if (not move.scrapped) or (not move.extra_consumed):\n done += move.product_qty\n if (prod.product_qty - done) <= 0:\n raise osv.except_osv(_('Warning!'), _('Click on \"Force To Close\" button to generate remain scrap order.'))\n return (prod.product_qty - done) or prod.product_qty", "def __confirm_trade_lots(\n self, trade_lots: int, trade_price: float, trader_fund: int\n ):\n final_trade_lots = 0\n trade_volume = abs(trade_lots) * self.board_lot\n if trade_lots > 0:\n if self.__have_enough_money(trader_fund, trade_price, trade_volume):\n final_trade_lots = trade_lots\n else:\n final_trade_lots = 0\n elif trade_lots < 0:\n hold_volume = self.hold_volume\n if self.__have_enough_volume(hold_volume, trade_volume):\n final_trade_lots = trade_lots\n else:\n final_trade_lots = 0\n return final_trade_lots", "def test_fifo_without_nones(self):\n # Give each quant a package_id and in_date\n oldest_time = datetime.now() - timedelta(days=5)\n self.quant1.write({\"package_id\": self.pack1.id, \"in_date\": datetime.now()})\n self.quant2.write({\"package_id\": self.pack2.id, \"in_date\": oldest_time})\n self.quant3.write({\"package_id\": self.pack3.id, \"in_date\": oldest_time})\n\n # Reserve quantity - one apple\n reserved_quants = self.Quant._update_reserved_quantity(\n self.apple, self.test_stock_location_01, 1\n )\n reserved_quant = reserved_quants[0][0]\n\n # Should choose between quant2 and quant3 based on `in_date`.\n # Choose quant2 as it has a smaller package id.\n self.assertEqual(reserved_quant.in_date, oldest_time)\n self.assertEqual(reserved_quant.package_id, self.pack2)\n self.assertEqual(reserved_quant, self.quant2)", "def test_fixed_order_quantity(self):\n fixed_order_quantity = self._uncertain_demand.fixed_order_quantity\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n cal_fixed_orders = lambda j, x, y, z: (2 * j * (x / (y * z))) ** 0.5\n test_fixed_orders = cal_fixed_orders(\n float(self._reorder_cost),\n float(avg_order),\n float(self._unit_cost),\n float(self._holding_cost_percentge)\n )\n\n self.assertEqual(int(fixed_order_quantity), int(test_fixed_orders))", "def test_for_non_splittable_hand(self):\n hand = self._hand\n cards = [BjCard('clubs', '7'), BjCard('diamonds', '4')]\n for card in cards:\n hand.add_card(card)\n self.assertEqual(hand.can_split, False)", "def parts_demand(request):\n critical_part = []\n quantity = None\n bom_name = None\n if request.method == 'POST':\n form = PartsDemandForm(request.POST)\n if form.is_valid():\n bom_name = form.cleaned_data['bom']\n quantity = int(form.cleaned_data['quantity'])\n warehouse = form.cleaned_data['warehouse']\n warehouse_obj = Warehouse.objects.get(warehouse_name=warehouse)\n stock = calculate_stock(warehouse_obj)\n parts = get_bom_parts(bom_name)\n print(stock)\n for part in parts:\n part_qty = float(part['Qty'])\n part_name = part['PartName']\n part_number = part['PartNumber']\n if stock.get(part_name):\n av_stock = stock.get(part_name)['total_usable_stock']\n # print(av_stock, quantity, part_qty, quantity * part_qty)\n else:\n av_stock = 0\n critical = int(av_stock) - int(quantity * part_qty)\n if critical <= 0:\n test = {\n \"critical_qty\": critical,\n \"part_number\": part_number,\n \"part_name\": part_name\n }\n critical_part.append(test)\n else:\n form = PartsDemandForm()\n context = {\n 'form': form,\n 'critical_part': critical_part,\n 'quantity': quantity,\n 'bom': bom_name,\n }\n\n return render(request, 'inventory/parts_demand.html', context)", "def _action_done(self):\n for ml in self:\n if ml.lot_name_in == ml.lot_name_repeat:\n if ml.lot_id and ml.lot_name_in and ml.product_id.tracking == 'serial':\n ml.lot_id.name = ml.lot_name_in\n ml.lot_id.lot_name_chasis = ml.lot_name\n ml.lot_id.edicion = ml.move_id.edicion\n ml.lot_id.colorinterno = ml.move_id.colorinterno.id\n ml.lot_id.colorexterno = ml.move_id.colorexterno.id\n ml.lot_id.n_llaves = ml.n_llaves\n ml.lot_id.cant_llaves = ml.cant_llaves\n ml.lot_id.n_caja = ml.n_caja\n ml.lot_id.mot_desarmada = ml.mot_desarmada\n ml.lot_name = ml.lot_name_in\n ml.lot_id.embarque = ml.picking_id.embarque\n for incidence in ml.incidencia:\n ml.lot_id.incidencia = [(4, incidence.id)]\n for incid in ml.lot_id.incidencia:\n incid.lot_id = ml.lot_id.id\n else:\n raise ValidationError(_(\n 'El numero de chasis \"%s\" no esta igual que el repetido') % ml.lot_name_in)\n\n super(StockMoveLine, self)._action_done()", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk, self.last_price, self.timer.now())\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = float(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.ticker, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.ticker, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_01_base(self):\n # Create/validate PO\n order = self.create_and_validate_po()\n\n # Validate picking\n picking = order.picking_ids[0]\n picking.do_transfer()\n self.assertEqual(picking.state, 'done')", "def _buy(self, units=1):\n self.quantity -= units", "def test_initial_risk_position_sizer_with_cap(self):\n fraction_at_risk = 0.01 # will give leverage of 2, that will be capped to 1.5\n signal = Signal(self.ticker, Exposure.LONG, fraction_at_risk)\n orders = self.initial_risk_position_sizer.size_signals([signal])\n\n self.assertEqual(len(orders), 2) # market order and stop order\n portfolio_value = self.initial_position / self.initial_allocation\n max_leverage = self.initial_risk_position_sizer.max_target_percentage\n target_quantity = int(np.floor(portfolio_value * max_leverage))\n additional_contracts = target_quantity - self.initial_position\n self.assertEqual(orders[0], Order(self.contract, additional_contracts, MarketOrder(), TimeInForce.OPG))\n\n stop_price = self.last_price * (1 - fraction_at_risk)\n stop_quantity = -(self.initial_position + additional_contracts)\n self.assertEqual(orders[1], Order(self.contract, stop_quantity, StopOrder(stop_price), TimeInForce.GTC))", "def test_single_quant_assign_correct_quant(self):\n Quant = self.env[\"stock.quant\"]\n\n # Create a bunch of identical quants in the same location\n quants = Quant.browse()\n for i in range(5):\n quants |= self.create_quant(self.apple.id, self.test_stock_location_01.id, 10)\n self.assertEqual(len(quants), 5)\n\n quant = quants[2]\n pick = quant.create_picking(self.picking_type_pick, confirm=True, assign=True)\n self.assertEqual(pick.state, \"assigned\")\n self.assertEqual(quant.reserved_quantity, 10)", "def test_purchase_order_product_bundle(self):\n self.purchase = self.env['purchase.order'].with_user(self.purchase_user).create(self.order_vals)\n self.assertTrue(self.purchase, 'Purchase: no purchase order created')\n self.assertEqual(self.purchase.invoice_status, 'no', 'Purchase: PO invoice_status should be \"Not purchased\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_received'), [0.0, 0.0], 'Purchase: no product should be received\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [0.0, 0.0], 'Purchase: no product should be invoiced\"')\n\n self.purchase.button_confirm()\n self.assertEqual(self.purchase.state, 'purchase', 'Purchase: PO state should be \"Purchase\"')\n self.assertEqual(self.purchase.invoice_status, 'to invoice', 'Purchase: PO invoice_status should be \"Waiting Invoices\"')\n\n self.assertEqual(self.purchase.picking_count, 1, 'Purchase: one picking should be created\"')\n self.picking = self.purchase.picking_ids[0]\n self.picking.move_line_ids.write({'qty_done': 1.0})\n self.picking.button_validate()\n\n product_bundle_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_bundle_id)\n product_3_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_3)\n self.bundle_order_qty = sum(product_bundle_line.mapped('product_uom_qty'))\n self.product_3_order_qty = sum(product_3_line.mapped('product_uom_qty'))\n self.total_bundle_order_qty = self.count_item_pack *self.bundle_order_qty\n\n self.assertEqual(self.bundle_order_qty, 1, 'Purchase: product bundle ordered quantity')\n self.assertEqual(self.total_bundle_order_qty, 3, 'Purchase: product bundle total quantity')\n self.assertEqual(self.product_3_order_qty, 1, 'Purchase: product Samsung S20 ordered quantity')\n self.assertEqual(product_bundle_line.mapped('qty_received'), [self.total_bundle_order_qty], 'Purchase: the product bundle should be received\"')\n self.assertEqual(product_3_line.mapped('qty_received'), [self.product_3_order_qty], 'Purchase: the product samsung S20 should be received\"')\n \n move_form = Form(self.env['account.move'].with_context(default_move_type='in_invoice'))\n move_form.partner_id = self.vendor\n move_form.purchase_id = self.purchase\n self.bill = move_form.save()\n\n # Control Policy products is On ordered quantities\n # self.bundle_order_qty = 1\n # self.product_3_order_qty = 1\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [1, 1], 'Purchase: all products should be invoiced based on ordered quantity\"')", "def validate_product_quantity(item, qty):\n return True" ]
[ "0.61881334", "0.60192597", "0.57607996", "0.556063", "0.55559486", "0.553951", "0.5507649", "0.5501214", "0.53878224", "0.5373135", "0.5346838", "0.53159076", "0.53042346", "0.530181", "0.5292941", "0.5262657", "0.5252141", "0.5244603", "0.52374023", "0.5235112", "0.52324265", "0.5227628", "0.5203922", "0.5168775", "0.5154514", "0.5151504", "0.5150079", "0.51411873", "0.5138534", "0.51321965" ]
0.6650053
0
Make sure a kit is split in the corrects product_qty by components in case of a planned transfer.
def test_kit_planned_transfer(self): picking = self.env['stock.picking'].create({ 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, 'partner_id': self.test_partner.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'immediate_transfer': False, }) move_receipt_1 = self.env['stock.move'].create({ 'name': self.kit_parent.name, 'product_id': self.kit_parent.id, 'product_uom_qty': 3, 'product_uom': self.kit_parent.uom_id.id, 'picking_id': picking.id, 'picking_type_id': self.env.ref('stock.picking_type_in').id, 'location_id': self.test_supplier.id, 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id, }) picking.action_confirm() # We check that the picking has the correct quantities after its move were splitted. self.assertEqual(len(picking.move_lines), 7) for move_line in picking.move_lines: self.assertEqual(move_line.product_qty, self.expected_quantities[move_line.product_id])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_overprocessed_subcontract_qty(self):\n overprocessed_moves = self.env['stock.move']\n for move in self:\n if not move.is_subcontract:\n continue\n # Extra quantity is allowed when components do not need to be register\n if not move._has_tracked_subcontract_components():\n continue\n rounding = move.product_uom.rounding\n if float_compare(move.quantity_done, move.move_orig_ids.production_id.qty_produced, precision_rounding=rounding) > 0:\n overprocessed_moves |= move\n if overprocessed_moves:\n raise UserError(_(\"\"\"\nYou have to use 'Records Components' button in order to register quantity for a\nsubcontracted product(s) with tracked component(s):\n %s.\nIf you want to process more than initially planned, you\ncan use the edit + unlock buttons in order to adapt the initial demand on the\noperations.\"\"\") % ('\\n'.join(overprocessed_moves.mapped('product_id.display_name'))))", "def set_so_pack_operation_lot(self, picking):\n StockProductionLot = self.env['stock.production.lot']\n sale_line_obj = self.env['sale.order.line']\n has_wrong_lots = False\n for del_move in picking.move_lines:\n del_move.move_line_ids.unlink()\n for move in picking.move_lines:\n picking_type = picking.picking_type_id\n # lots_necessary = True\n if picking_type:\n if not picking_type.use_existing_lots:\n picking_type.write({'use_existing_lots':True})\n # lots_necessary = picking_type and picking_type.use_existing_lots\n qty = 0\n qty_done = 0\n pack_lots = []\n pack_lot_id = []\n for ord_line in self.order_line:\n if ord_line.lot_id and ord_line.lot_id.product_id.id == move.product_id.id:\n pack_lot_id.append(ord_line.lot_id.id)\n # if pack_lot_names and lots_necessary:\n if pack_lot_id:\n for lot_id in list(set(pack_lot_id)):\n stock_production_lot = StockProductionLot.search([('id', '=', lot_id), ('product_id', '=', move.product_id.id)])\n sale_order_line = sale_line_obj.search([('lot_id', '=', lot_id),('order_id', '=', self.id), ('product_id', '=', move.product_id.id)])\n if stock_production_lot and sale_order_line:\n if stock_production_lot.product_id.tracking == 'lot':\n # if a lot nr is set through the frontend it will refer to the full quantity\n qty = sale_order_line[0].product_uom_qty\n else:\n qty = 1.0\n qty_done += qty\n pack_lots.append({'lot_id': stock_production_lot.id, 'qty': qty})\n else:\n has_wrong_lots = True\n # elif move.product_id.tracking == 'none' or not lots_necessary:\n elif move.product_id.tracking == 'none':\n qty_done = move.product_uom_qty\n else:\n has_wrong_lots = True\n for pack_lot in pack_lots:\n lot_id, qty = pack_lot['lot_id'], pack_lot['qty']\n self.env['stock.move.line'].create({\n 'move_id': move.id,\n 'product_id': move.product_id.id,\n 'product_uom_id': move.product_uom.id,\n 'qty_done': qty,\n 'location_id': move.location_id.id,\n 'location_dest_id': move.location_dest_id.id,\n 'lot_id': lot_id,\n })\n if not pack_lots:\n move.quantity_done = qty_done\n return has_wrong_lots", "def test_purchase_order_product_bundle(self):\n self.purchase = self.env['purchase.order'].with_user(self.purchase_user).create(self.order_vals)\n self.assertTrue(self.purchase, 'Purchase: no purchase order created')\n self.assertEqual(self.purchase.invoice_status, 'no', 'Purchase: PO invoice_status should be \"Not purchased\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_received'), [0.0, 0.0], 'Purchase: no product should be received\"')\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [0.0, 0.0], 'Purchase: no product should be invoiced\"')\n\n self.purchase.button_confirm()\n self.assertEqual(self.purchase.state, 'purchase', 'Purchase: PO state should be \"Purchase\"')\n self.assertEqual(self.purchase.invoice_status, 'to invoice', 'Purchase: PO invoice_status should be \"Waiting Invoices\"')\n\n self.assertEqual(self.purchase.picking_count, 1, 'Purchase: one picking should be created\"')\n self.picking = self.purchase.picking_ids[0]\n self.picking.move_line_ids.write({'qty_done': 1.0})\n self.picking.button_validate()\n\n product_bundle_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_bundle_id)\n product_3_line = self.purchase.order_line.filtered(lambda l: l.product_id == self.product_3)\n self.bundle_order_qty = sum(product_bundle_line.mapped('product_uom_qty'))\n self.product_3_order_qty = sum(product_3_line.mapped('product_uom_qty'))\n self.total_bundle_order_qty = self.count_item_pack *self.bundle_order_qty\n\n self.assertEqual(self.bundle_order_qty, 1, 'Purchase: product bundle ordered quantity')\n self.assertEqual(self.total_bundle_order_qty, 3, 'Purchase: product bundle total quantity')\n self.assertEqual(self.product_3_order_qty, 1, 'Purchase: product Samsung S20 ordered quantity')\n self.assertEqual(product_bundle_line.mapped('qty_received'), [self.total_bundle_order_qty], 'Purchase: the product bundle should be received\"')\n self.assertEqual(product_3_line.mapped('qty_received'), [self.product_3_order_qty], 'Purchase: the product samsung S20 should be received\"')\n \n move_form = Form(self.env['account.move'].with_context(default_move_type='in_invoice'))\n move_form.partner_id = self.vendor\n move_form.purchase_id = self.purchase\n self.bill = move_form.save()\n\n # Control Policy products is On ordered quantities\n # self.bundle_order_qty = 1\n # self.product_3_order_qty = 1\n self.assertEqual(self.purchase.order_line.mapped('qty_invoiced'), [1, 1], 'Purchase: all products should be invoiced based on ordered quantity\"')", "def test_product_buy_more_then_have(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 50)\n self.assertFalse(result_buy)", "def validate_product_quantity(item, qty):\n return True", "def test_free_product(self):\n product = self.create_product(price=D('0.00'))\n\n res = self.do_get_basket()\n self.assertEqual(res.status_code, 200)\n basket_id = res.data['id']\n\n res = self.do_add_to_basket(product.id)\n self.assertEqual(res.status_code, 200)\n\n cs_url, cs_data = self.do_sign_auth_request(basket_id)\n\n self.assertEqual(cs_data['amount'], '0.00')\n\n res = self.do_cybersource_post(cs_url, cs_data)\n self.assertEqual(res.status_code, 302)\n self.check_finished_order(cs_data['reference_number'], product.id)", "def calc_qty(self, cr, uid, production_id, context=None):\n prod = self.pool.get('mrp.production').browse(cr, uid,production_id\n , context=context)\n done = 0.0\n for wo in prod.workcenter_lines:\n for mrej in wo.moves_rejection:\n done += mrej.s_rejected_qty or 0.0\n for move in prod.move_created_ids2:\n if move.product_id == prod.product_id:\n #ignore scrapped and extra consumed\n if (not move.scrapped) or (not move.extra_consumed):\n done += move.product_qty\n if (prod.product_qty - done) <= 0:\n raise osv.except_osv(_('Warning!'), _('Click on \"Force To Close\" button to generate remain scrap order.'))\n return (prod.product_qty - done) or prod.product_qty", "def parts_demand(request):\n critical_part = []\n quantity = None\n bom_name = None\n if request.method == 'POST':\n form = PartsDemandForm(request.POST)\n if form.is_valid():\n bom_name = form.cleaned_data['bom']\n quantity = int(form.cleaned_data['quantity'])\n warehouse = form.cleaned_data['warehouse']\n warehouse_obj = Warehouse.objects.get(warehouse_name=warehouse)\n stock = calculate_stock(warehouse_obj)\n parts = get_bom_parts(bom_name)\n print(stock)\n for part in parts:\n part_qty = float(part['Qty'])\n part_name = part['PartName']\n part_number = part['PartNumber']\n if stock.get(part_name):\n av_stock = stock.get(part_name)['total_usable_stock']\n # print(av_stock, quantity, part_qty, quantity * part_qty)\n else:\n av_stock = 0\n critical = int(av_stock) - int(quantity * part_qty)\n if critical <= 0:\n test = {\n \"critical_qty\": critical,\n \"part_number\": part_number,\n \"part_name\": part_name\n }\n critical_part.append(test)\n else:\n form = PartsDemandForm()\n context = {\n 'form': form,\n 'critical_part': critical_part,\n 'quantity': quantity,\n 'bom': bom_name,\n }\n\n return render(request, 'inventory/parts_demand.html', context)", "def test_kit_immediate_transfer(self):\n picking = self.env['stock.picking'].create({\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n 'partner_id': self.test_partner.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'immediate_transfer': True\n })\n move_receipt_1 = self.env['stock.move'].create({\n 'name': self.kit_parent.name,\n 'product_id': self.kit_parent.id,\n 'quantity_done': 3,\n 'product_uom': self.kit_parent.uom_id.id,\n 'picking_id': picking.id,\n 'picking_type_id': self.env.ref('stock.picking_type_in').id,\n 'location_id': self.test_supplier.id,\n 'location_dest_id': self.warehouse_1.wh_input_stock_loc_id.id,\n })\n picking.button_validate()\n\n # We check that the picking has the correct quantities after its move were splitted.\n self.assertEqual(len(picking.move_lines), 7)\n for move_line in picking.move_lines:\n self.assertEqual(move_line.quantity_done, self.expected_quantities[move_line.product_id])", "def test_excess_quantity(self):\n excess = self._uncertain_demand.excess_stock\n avg_order = sum([int(item) for item in self._data_set.values()]) //len(self._data_set)\n variance = [(item - avg_order) for item in self._data_set.values()]\n stdev = pow(sum([pow(j, 2) for j in variance]) / len(self._data_set), 0.5)\n cal_safety = lambda x, y, z: x * y * (z ** 0.5)\n safety_stock = cal_safety(float(self._z_value), float(stdev), float(self._lead_time))\n cal_reorder_level = lambda x, y, z: ((x ** 0.5) * y) + z\n reorder = cal_reorder_level(float(self._lead_time), avg_order, float(safety_stock))\n cal_excess = lambda x, y, z: round(x - (y + (y - z)), 0) if x > y + (y - z) else 0\n test_excess = cal_excess(self._quantity_on_hand, reorder, safety_stock)\n self.assertEqual(int(excess), int(test_excess))", "def test_product_bundle_price_calculation(self):\n template = self.product_apple_bundle\n template.write({'is_calpack_price': False})\n template.write({'is_calpack_price': True})\n self.assertEqual(template.list_price, self.total_price, 'Product: a product bundle canculation sale price')\n self.assertEqual(template.standard_price, self.total_cost, 'Product: a product bundle canculation product cost')", "def test_sell_ticket_valid_quantity(self, *_):\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"-1\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")\n\n # logout to invalidate any logged in session\n self.open(base_url + '/logout')\n # login a user\n self.open(base_url + '/login')\n # fill email and password\n self.type(\"#email\", \"[email protected]\")\n self.type(\"#password\", \"Test_frontend@\")\n # click enter button\n self.click('input[type=\"submit\"]')\n # open the /sell route\n self.open(base_url)\n # Enter an invalid ticket name\n self.type('#name_sell', \"ticketname\")\n self.type('#quantity_sell', \"101\")\n self.type(\"#price_sell\", \"15\")\n self.type(\"#exp_date_sell\", \"20200921\")\n self.click('#submit-sell')\n # Assert that the valid error message is shown\n self.assert_text(\"Invalid quantity of tickets\", \"#message\")", "def test_bundle_is_product_pack(self):\n template = self.product_apple_bundle\n product_pack_ids = template.product_pack_ids\n self.assertTrue(template.is_pack, 'Product template is a bundle pack')\n self.assertTrue(len(product_pack_ids) != 0, 'Product: a product bundle should have product pack')\n self.assertEqual(len(product_pack_ids), 3, 'Product: a product bundle should have product pack')", "def test_product_buy_missing_goods(self):\n result_buy = self.info_list.product_buy(\"хлеб серый хлебозавод\", 3)\n self.assertFalse(result_buy)", "def test_stock_quantity_own_part(self):\n self.portfolio = './app/test/payloads/port_own_part.json'\n self.stock = ['ETH', 'BTC']\n self.stock_url = 'https://min-api.cryptocompare.com/data/pricemultifull?fsyms=ETH&tsyms=USD'\n\n expected_data = {'ETH': 1.108, 'BTC': 0}\n\n #Run stock_quantity on test data\n self.add_perf = PortfolioAddPerf(self.portfolio, self.stock, self.stock_url)\n return_data = self.add_perf.stock_quantity()\n\n self.assertEqual(expected_data, return_data)", "def change_product_qty(self):\n Inventory = self.env['stock.inventory']\n\n\n for wizard in self:\n product = wizard.product_id.with_context(location=wizard.location_id.id, lot_id=wizard.lot_id.id)\n line_data = wizard._action_start_line()\n\n\n if wizard.product_id.id and wizard.lot_id.id:\n inventory_filter = 'none'\n elif wizard.product_id.id:\n inventory_filter = 'product'\n else:\n inventory_filter = 'none'\n inventory = Inventory.create({\n 'name': _('INV: %s') % tools.ustr(wizard.product_id.display_name),\n 'filter': inventory_filter,\n 'product_id': wizard.product_id.id,\n 'location_id': wizard.location_id.id,\n 'lot_id': wizard.lot_id.id,\n 'line_ids': [(0, 0, line_data)],\n })\n inventory.action_done()\n return {'type': 'ir.actions.act_window_close'}", "def test_product_buy(self):\n result_buy = self.info_list.product_buy(\"соль 1 кг\", 5)\n self.assertEqual(result_buy, 175)", "def action_consume_custom(self, cr, uid, ids, product_qty, location_id=False, restrict_lot_id=False, restrict_partner_id=False,\r\n consumed_for=False, context=None):\r\n if context is None:\r\n context = {}\r\n res = []\r\n production_obj = self.pool.get('mrp.production.custom')\r\n\r\n if product_qty <= 0:\r\n raise osv.except_osv(_('Warning!'), _('Please provide proper quantity.'))\r\n #because of the action_confirm that can create extra moves in case of phantom bom, we need to make 2 loops\r\n ids2 = []\r\n for move in self.browse(cr, uid, ids, context=context):\r\n if move.state == 'draft':\r\n ids2.extend(self.action_confirm(cr, uid, [move.id], context=context))\r\n else:\r\n ids2.append(move.id)\r\n\r\n prod_orders = set()\r\n for move in self.browse(cr, uid, ids2, context=context):\r\n prod_orders.add(move.custom_production_id.id)\r\n print\"Total Qty>>>\",product_qty\r\n move_qty = product_qty\r\n if move_qty <= 0.00:\r\n raise osv.except_osv(_('Error!'), _('Cannot consume a move with negative or zero quantity.'))\r\n \r\n quantity_rest = move_qty - product_qty\r\n print\"Rest Qty>>>\",quantity_rest\r\n # Compare with numbers of move uom as we want to avoid a split with 0 qty\r\n quantity_rest_uom = move.product_uom_qty - self.pool.get(\"product.uom\")._compute_qty_obj(cr, uid, move.product_id.uom_id, product_qty, move.product_uom)\r\n if float_compare(quantity_rest_uom, 0, precision_rounding=move.product_uom.rounding) != 0:\r\n new_mov = self.split(cr, uid, move, quantity_rest, context=context)\r\n print\"New Move>>>\",new_mov\r\n res.append(new_mov)\r\n vals = {'restrict_lot_id': restrict_lot_id,\r\n 'restrict_partner_id': restrict_partner_id,\r\n 'consumed_for': consumed_for}\r\n if location_id:\r\n vals.update({'location_id': location_id})\r\n self.write(cr, uid, [move.id], vals, context=context)\r\n # Original moves will be the quantities consumed, so they need to be done\r\n self.action_done(cr, uid, ids2, context=context)\r\n if res:\r\n self.action_assign(cr, uid, res, context=context)\r\n if prod_orders:\r\n \r\n production_obj.action_in_production(cr, uid, list(prod_orders), context=None)\r\n #production_obj.signal_workflow(cr, uid, list(prod_orders), 'button_produce')\r\n return res", "def test_putaway_after_manufacturing_3(self):\n self.laptop.tracking = 'serial'\n mo_laptop = self.new_mo_laptop()\n serial = self.env['stock.production.lot'].create({'product_id': self.laptop.id, 'company_id': self.env.company.id})\n\n mo_form = Form(mo_laptop)\n mo_form.qty_producing = 1\n mo_form.lot_producing_id = serial\n mo_laptop = mo_form.save()\n mo_laptop.button_mark_done()\n\n # We check if the laptop go in the depot and not in the stock\n move = mo_laptop.move_finished_ids\n location_dest = move.move_line_ids.location_dest_id\n self.assertEqual(location_dest.id, self.depot_location.id)\n self.assertNotEqual(location_dest.id, self.stock_location.id)", "def create(self, request, *args, **kwargs):\n user = request.user\n\n # Copy the request data, to side-step \"mutability\" issues\n data = OrderedDict()\n # Update with cleaned input data\n data.update(self.clean_data(request.data))\n\n quantity = data.get('quantity', None)\n\n if quantity is None:\n raise ValidationError({\n 'quantity': _('Quantity is required'),\n })\n\n try:\n part = Part.objects.get(pk=data.get('part', None))\n except (ValueError, Part.DoesNotExist):\n raise ValidationError({\n 'part': _('Valid part must be supplied'),\n })\n\n # Set default location (if not provided)\n if 'location' not in data:\n location = part.get_default_location()\n\n if location:\n data['location'] = location.pk\n\n expiry_date = data.get('expiry_date', None)\n\n # An expiry date was *not* specified - try to infer it!\n if expiry_date is None and part.default_expiry > 0:\n data['expiry_date'] = datetime.now().date() + timedelta(days=part.default_expiry)\n\n # Attempt to extract serial numbers from submitted data\n serials = None\n\n # Check if a set of serial numbers was provided\n serial_numbers = data.get('serial_numbers', '')\n\n # Check if the supplier_part has a package size defined, which is not 1\n if 'supplier_part' in data and data['supplier_part'] is not None:\n try:\n supplier_part = SupplierPart.objects.get(pk=data.get('supplier_part', None))\n except (ValueError, SupplierPart.DoesNotExist):\n raise ValidationError({\n 'supplier_part': _('The given supplier part does not exist'),\n })\n\n if supplier_part.base_quantity() != 1:\n # Skip this check if pack size is 1 - makes no difference\n # use_pack_size = True -> Multiply quantity by pack size\n # use_pack_size = False -> Use quantity as is\n if 'use_pack_size' not in data:\n raise ValidationError({\n 'use_pack_size': _('The supplier part has a pack size defined, but flag use_pack_size not set'),\n })\n else:\n if bool(data.get('use_pack_size')):\n quantity = data['quantity'] = supplier_part.base_quantity(quantity)\n\n # Divide purchase price by pack size, to save correct price per stock item\n if data['purchase_price'] and supplier_part.pack_quantity_native:\n try:\n data['purchase_price'] = float(data['purchase_price']) / float(supplier_part.pack_quantity_native)\n except ValueError:\n pass\n\n # Now remove the flag from data, so that it doesn't interfere with saving\n # Do this regardless of results above\n if 'use_pack_size' in data:\n data.pop('use_pack_size')\n\n # Assign serial numbers for a trackable part\n if serial_numbers:\n\n if not part.trackable:\n raise ValidationError({\n 'serial_numbers': [_(\"Serial numbers cannot be supplied for a non-trackable part\")]\n })\n\n # If serial numbers are specified, check that they match!\n try:\n serials = extract_serial_numbers(\n serial_numbers,\n quantity,\n part.get_latest_serial_number()\n )\n\n # Determine if any of the specified serial numbers are invalid\n # Note \"invalid\" means either they already exist, or do not pass custom rules\n invalid = []\n errors = []\n\n for serial in serials:\n try:\n part.validate_serial_number(serial, raise_error=True)\n except DjangoValidationError as exc:\n # Catch raised error to extract specific error information\n invalid.append(serial)\n\n if exc.message not in errors:\n errors.append(exc.message)\n\n if len(errors) > 0:\n\n msg = _(\"The following serial numbers already exist or are invalid\")\n msg += \" : \"\n msg += \",\".join([str(e) for e in invalid])\n\n raise ValidationError({\n 'serial_numbers': errors + [msg]\n })\n\n except DjangoValidationError as e:\n raise ValidationError({\n 'quantity': e.messages,\n 'serial_numbers': e.messages,\n })\n\n if serials is not None:\n \"\"\"If the stock item is going to be serialized, set the quantity to 1.\"\"\"\n data['quantity'] = 1\n\n # De-serialize the provided data\n serializer = self.get_serializer(data=data)\n serializer.is_valid(raise_exception=True)\n\n with transaction.atomic():\n\n # Create an initial StockItem object\n item = serializer.save()\n\n if serials:\n # Assign the first serial number to the \"master\" item\n item.serial = serials[0]\n\n # Save the item (with user information)\n item.save(user=user)\n\n if serials:\n for serial in serials[1:]:\n\n # Create a duplicate stock item with the next serial number\n item.pk = None\n item.serial = serial\n\n item.save(user=user)\n\n response_data = {\n 'quantity': quantity,\n 'serial_numbers': serials,\n }\n\n else:\n response_data = serializer.data\n\n return Response(response_data, status=status.HTTP_201_CREATED, headers=self.get_success_headers(serializer.data))", "def ingredient_used(self, item, quantity):\n logger.info('ReleaseDiscard ingredient used initiated')\n try:\n quantity = Decimal(quantity).quantize(Decimal('0.11'))\n inventory_list = self.Inventory.search([('location', '=', self.kitchen.id)]\n , order=[('batch_number', 'ASC')])\n product = self.Product.search([('name', '=', item),\n ('description', '=', 'Stock'),\n ('type', '=', 'goods')])[-1]\n done = False\n today = date.today()\n for i in inventory_list:\n for j in i.lines:\n if j.product.template.name == item:\n expiry = j.expiry_date\n if expiry:\n if expiry >= today:\n if Decimal(j.quantity) >= Decimal(quantity):\n j.quantity = Decimal(j.quantity) - Decimal(quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=quantity,\n batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=quantity, batch=i.batch_number)\n j.save()\n self.check_and_delete(i)\n done = True\n else:\n quantity = Decimal(quantity) - Decimal(j.quantity)\n self.move(from_location=self.kitchen, to_location=self.used, item=product,\n quantity=j.quantity, batch_number=i.batch_number)\n self.store_inventory(location=self.used, inventory_stock=j,\n quantity=j.quantity, batch=i.batch_number)\n j.quantity = 0\n j.save()\n self.check_and_delete(i)\n # transaction.cursor.commit()\n i.save()\n if done:\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def _onchange_product_id_check_availability(self):\n res = super(SaleOrderLine, self)._onchange_product_id_check_availability()\n if res.get('warning'):\n product = self.product_id.with_context(warehouse=self.order_id.warehouse_id.id)\n self.product_uom_qty = product.virtual_available\n if self.product_uom_qty < 0:\n self.product_uom_qty = 0\n return res", "def action_stagger_purchase_delivery(self):\n for wizard in self:\n #On vérifie que la quantité entrée est inférieure à la quantité de la ligne \n #d'achat \n purchase_line = wizard.purchase_line_id\n price_unit = purchase_line.price_unit\n if wizard.quantity <= 0:\n raise except_orm(_(\"Error\"), _('You must enter a quantity superior to 0'))\n \n if wizard.quantity >= purchase_line.sec_uom_qty:\n raise except_orm(_(\"Error\"), _('You must enter a quantity inferior to the initial purchase '\n 'line quantity'))\n \n #On récupère les valeurs entrées dans le wizard\n values = {'sec_uom_qty': wizard.quantity,\n 'expected_date': wizard.new_date}\n new_line = purchase_line.copy(values)\n new_line._onchange_sec_uom_qty(with_warning=False)\n new_line._onchange_uom_qty()\n new_line._onchange_uoi_qty()\n new_line.write({'price_unit': price_unit})\n #On décrémente la ligne initiale de la quantité de la nouvelle ligne (car celle-ci respecte forcément\n #le multiple et le minimum\n purchase_line.write({'sec_uom_qty': purchase_line.sec_uom_qty - wizard.quantity})\n purchase_line._onchange_sec_uom_qty(with_warning=False)\n purchase_line._onchange_uom_qty()\n purchase_line._onchange_uoi_qty()\n purchase_line.write({'price_unit': price_unit})\n #On retourne l'achat\n if wizard.purchase_id:\n action_dict = get_form_view(self, 'purchase.purchase_order_see_form')\n if action_dict and action_dict.get('id') and action_dict.get('type'):\n action = self.env[action_dict['type']].browse(action_dict['id'])\n action_struc = action.read()\n action_struc[0]['res_id'] = wizard.purchase_id.id\n action_struc = action_struc[0]\n \n return action_struc\n else:\n return {'type': 'ir.actions.act_window_close'}", "def test_06_replacement_product_wizard(self):\n # Create a purchase order with two lines.\n order = self.create_po()\n sellable_product = self.product_obj.browse(self.sellable_product)\n draft_product = self.product_obj.browse(self.draft_product)\n self.create_pol(order, sellable_product)\n self.create_pol(order, draft_product)\n self.assertNotIn('obsolete',\n order.order_line.mapped('product_id.state2'))\n\n # Update sellable product to obsolete\n # NOTE: This check check the write() method of the product.product\n # record.\n self.assertIn(sellable_product, order.order_line.mapped('product_id'))\n self.assertEqual(sellable_product.state2, 'sellable')\n sellable_product.state2 = 'obsolete'\n self.assertEqual(sellable_product.state2, 'obsolete')\n\n # Check that the purchase order line now have a obsolete line.\n obsolete_order_line = order.order_line.filtered(\n lambda line: line.product_id.state2 == 'obsolete')\n self.assertTrue(obsolete_order_line)\n self.assertEqual(obsolete_order_line.product_id, sellable_product)\n\n # Simulate click on the \"Check Discontinued Products\" button to run the\n # replacemenet product wizard.\n wiz = self.wiz_obj.with_context({\n 'active_id': order.id,\n 'active_ids': [order.id],\n 'active_model': 'purchase.order',\n }).create({})\n\n # Chech that the wizard auto create correctly the replacement lines.\n # The replacement line must be linked/generate to the obsolete purchase\n # order line.\n self.assertTrue(wiz.lines)\n self.assertEqual(len(wiz.lines), 1)\n self.assertEqual(obsolete_order_line, wiz.lines.mapped('line_id'))\n\n # TODO add a case to try to add a new replacement line manually. this\n # must be fail.\n\n # Try to add an obsolete replacement product in the replacement line.\n # This will raise an exception becuase only not obsolete products can\n # be used as a valid replacement.\n wiz_line = wiz.lines[0]\n with self.assertRaises(exceptions.ValidationError):\n wiz_line.replacement_product_id = self.obsolete_replacement\n\n # Add a sellable replacement product in the replacement line.\n wiz_line.replacement_product_id = self.sellable_replacement\n self.assertEqual(wiz_line.replacement_product_id.id,\n self.sellable_replacement)\n\n # Make the replacement in the purchase order by clicking the button\n # \"Replace\" in the replacement wizard and check that the changes were\n # applied to the purchase order line.\n wiz.replacement()\n self.assertEqual(obsolete_order_line.product_id,\n wiz_line.replacement_product_id)\n self.assertEqual(obsolete_order_line.discontinued_product_id,\n wiz_line.discontinued_product_id)", "def _update_reserved_quantity(self, product_id, location_id, quantity, lot_id=None, package_id=None, owner_id=None,\n strict=False):\n self = self.sudo()\n rounding = product_id.uom_id.rounding\n quants = self._gather(product_id, location_id, lot_id=lot_id, package_id=package_id, owner_id=owner_id,\n strict=strict)\n reserved_quants = []\n\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n # if we want to reserve\n available_quantity = self._get_available_quantity(product_id, location_id, lot_id=lot_id,\n package_id=package_id, owner_id=owner_id, strict=strict)\n if float_compare(quantity, available_quantity, precision_rounding=rounding) > 0:\n raise UserError(_('It is not possible to reserve more products of %s than you have in stock.',\n product_id.display_name))\n elif float_compare(quantity, 0, precision_rounding=rounding) < 0:\n # if we want to unreserve\n available_quantity = sum(quants.mapped('reserved_quantity'))\n # if float_compare(abs(quantity), available_quantity, precision_rounding=rounding) > 0:\n # raise UserError(_('It is not possible to unreserve more products of %s than you have in stock.',\n # product_id.display_name))\n else:\n return reserved_quants\n\n for quant in quants:\n if float_compare(quantity, 0, precision_rounding=rounding) > 0:\n max_quantity_on_quant = quant.quantity - quant.reserved_quantity\n if float_compare(max_quantity_on_quant, 0, precision_rounding=rounding) <= 0:\n continue\n max_quantity_on_quant = min(max_quantity_on_quant, quantity)\n quant.reserved_quantity += max_quantity_on_quant\n reserved_quants.append((quant, max_quantity_on_quant))\n quantity -= max_quantity_on_quant\n available_quantity -= max_quantity_on_quant\n else:\n max_quantity_on_quant = min(quant.reserved_quantity, abs(quantity))\n quant.reserved_quantity -= max_quantity_on_quant\n reserved_quants.append((quant, -max_quantity_on_quant))\n quantity += max_quantity_on_quant\n available_quantity += max_quantity_on_quant\n\n if float_is_zero(quantity, precision_rounding=rounding) or float_is_zero(available_quantity,\n precision_rounding=rounding):\n break\n return reserved_quants", "def test_multiple_creates_do_not_increase_products(self):\n for i in xrange(0, 10):\n modified_po = copy.deepcopy(base_purchase_order)\n self.assertEqual(Supply.objects.get(pk=1).quantity, 10)\n \n resp = self.client.post('/api/v1/purchase-order/', format='json', data=modified_po)\n \n self.assertEqual(resp.status_code, 201, msg=resp)\n \n po_data = resp.data\n self.assertEqual(po_data['status'], 'AWAITING APPROVAL')\n\n item1 = po_data['items'][0]\n #self.assertEqual(item1['supply']['id'], 1)\n self.assertEqual(item1['status'], u'Ordered')\n\n item2 = po_data['items'][1]\n #self.assertEqual(item1['supply']['id'], 2)\n self.assertEqual(item1['status'], u'Ordered')\n \n #Test database values\n po = PurchaseOrder.objects.get(pk=resp.data['id'])\n self.assertEqual(po.status, 'AWAITING APPROVAL')\n for item in po.items.all():\n self.assertEqual(item.status, u\"Ordered\")\n \n supplier = Supplier.objects.get(pk=1)\n\n supply = Supply.objects.get(pk=1)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)\n\n supply = Supply.objects.get(pk=2)\n self.assertEqual(supply.quantity, 10)\n self.assertEqual(supply.products.filter(supplier=supplier).count(), 1)", "def _prepare_pack_ops(self, quants, forced_qties):\n valid_quants = quants.filtered(lambda quant: quant.qty > 0)\n _Mapping = namedtuple('Mapping', ('product', 'package', 'owner', 'location', 'location_dst_id','move_id'))\n all_products = valid_quants.mapped('product_id') | self.env['product.product'].browse(set(m.product_id.id for m,q in forced_qties)) | self.move_lines.mapped('product_id')\n computed_putaway_locations = dict(\n (product, self.location_dest_id.get_putaway_strategy(product) or self.location_dest_id.id) for product in all_products)\n product_to_uom = dict((product.id, product.uom_id) for product in all_products)\n picking_moves = self.move_lines.filtered(lambda move: move.state not in ('done', 'cancel'))\n for move in picking_moves:\n # If we encounter an UoM that is smaller than the default UoM or the one already chosen, use the new one instead.\n if move.product_uom != product_to_uom[move.product_id.id] and move.product_uom.factor > product_to_uom[move.product_id.id].factor:\n product_to_uom[move.product_id.id] = move.product_uom\n if len(picking_moves.mapped('location_id')) > 1:\n raise UserError(_('The source location must be the same for all the moves of the picking.'))\n if len(picking_moves.mapped('location_dest_id')) > 1:\n raise UserError(_('The destination location must be the same for all the moves of the picking.'))\n pack_operation_values = []\n # find the packages we can move as a whole, create pack operations and mark related quants as done\n top_lvl_packages = valid_quants._get_top_level_packages(computed_putaway_locations)\n for pack in top_lvl_packages:\n pack_quants = pack.get_content()\n pack_operation_values.append({\n 'picking_id': self.id,\n 'package_id': pack.id,\n 'product_qty': 1.0,\n 'location_id': pack.location_id.id,\n 'location_dest_id': computed_putaway_locations[pack_quants[0].product_id],\n 'owner_id': pack.owner_id.id,\n })\n valid_quants -= pack_quants\n # Go through all remaining reserved quants and group by product, package, owner, source location and dest location\n # Lots will go into pack operation lot object\n qtys_grouped = {}\n lots_grouped = {}\n for quant in valid_quants:\n key = _Mapping(quant.product_id, quant.package_id, quant.owner_id, quant.location_id, computed_putaway_locations[quant.product_id], quant.reservation_id)\n qtys_grouped.setdefault(key, 0.0)\n qtys_grouped[key] += quant.qty\n if quant.product_id.tracking != 'none' and quant.lot_id:\n lots_grouped.setdefault(key, dict()).setdefault(quant.lot_id.id, 0.0)\n lots_grouped[key][quant.lot_id.id] += quant.qty\n # Do the same for the forced quantities (in cases of force_assign or incomming shipment for example)\n for move_f, qty in forced_qties:\n if qty <= 0.0:\n continue\n key = _Mapping(move_f.product_id, self.env['stock.quant.package'], self.owner_id, self.location_id, computed_putaway_locations[move_f.product_id], move_f)\n qtys_grouped.setdefault(key, 0.0)\n qtys_grouped[key] += qty\n # Create the necessary operations for the grouped quants and remaining qtys\n Uom = self.env['product.uom']\n move_id_to_vals = {} # use it to create operations using the same order as the picking stock moves\n for mapping, qty in qtys_grouped.items():\n uom = product_to_uom[mapping.product.id]\n val_dict = {\n 'picking_id': self.id,\n 'product_qty': mapping.product.uom_id._compute_quantity(qty, uom),\n 'product_id': mapping.product.id,\n 'package_id': mapping.package.id,\n 'owner_id': mapping.owner.id,\n 'location_id': mapping.location.id,\n 'location_dest_id': mapping.location_dst_id,\n 'product_uom_id': uom.id,\n 'pack_lot_ids': [\n (0, 0, {'lot_id': lot, 'qty': 0.0, 'qty_todo': lots_grouped[mapping][lot]})\n for lot in lots_grouped.get(mapping, {}).keys()],\n }\n move_id_to_vals.setdefault(mapping.move_id.id, list()).append(val_dict)\n for move in self.move_lines.filtered(lambda move: move.state not in ('done', 'cancel')):\n values = move_id_to_vals.pop(move.id, [])\n pack_operation_values += values\n return pack_operation_values", "def test_product_not_available_by_stock(self):\n product = ProductFactory(stock_amount=2)\n\n for i in range(2):\n opr = OrderProductRelationFactory(product=product)\n order = opr.order\n order.paid = True\n order.save()\n\n self.assertEqual(product.left_in_stock, 0)\n self.assertFalse(product.is_stock_available)\n self.assertFalse(product.is_available())", "def _get_new_qty_for_none_goodies_line(self, cr, uid, qty, product_id, order_id, context=None):\n goodies_line_ids = self.search(cr, uid, [\n ['order_id', '=', order_id],\n ['product_id', '=', product_id],\n ['goodie_for_line_id', '!=', False]\n ], context=context)\n for goodie_line in self.browse(cr, uid, goodies_line_ids, context=context):\n qty -= goodie_line.product_qty\n if qty < 0:\n qty = 0\n return qty", "def test_team_builder_config_product_size_materials_post(self):\n pass" ]
[ "0.62997496", "0.6127928", "0.6034478", "0.6018836", "0.59107655", "0.5806491", "0.5761691", "0.5730393", "0.5726534", "0.57250893", "0.5659513", "0.56430745", "0.56031597", "0.5599683", "0.55872136", "0.5580281", "0.55272794", "0.55234873", "0.55033195", "0.54832494", "0.5462037", "0.5456558", "0.5439621", "0.54112643", "0.5378628", "0.53774136", "0.5376175", "0.5348306", "0.53454494", "0.53288656" ]
0.6308212
0
ability that deals damage to the target
def ability_1(self,target): damage = (self.get_strength()+2) target.receive_damage(damage)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ability_3(self,target):\r\n damage = (self.get_dexterity()+self.get_strength())\r\n target.receive_damage(damage)", "def ability_4(self,target):\r\n damage = (self.get_strength()*3)\r\n target.receive_damage(damage)", "def ability_2(self,target):\r\n damage1 = (self.get_lvl()+self.get_strength())\r\n target.receive_damage(damage1)", "def _attack(self,target):\r\n damage = self.get_strength() * self.get_lvl()\r\n target.receive_damage(damage)", "def attack(self, target, friendly):\n self.on_attack(target, friendly)\n dmg = self.on_deal_dmg(target, friendly)\n self.death_remove(friendly)\n # May remove other minions in special cases\n # ... \n\n return dmg", "def deal_damage(self, target):\n if hasattr(target, \"hp\"):\n dmg = random.randrange(self.atk + 1)\n target.take_damage(dmg)\n return dmg", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def on_attack(self, target, friendly):\n # Get buff from Dread Admiral Eliza\n if self.race == 'pirate' or self.race == 'all':\n eliza_buff_atk, eliza_buff_dfs = friendly.friendly_eliza_buff\n for each in friendly.minions:\n each.get_buff(eliza_buff_atk, eliza_buff_dfs)\n\n # If divine shield, not getting hurt\n if not self.divine_shield:\n self.hurt = True\n if not target.divine_shield:\n target.hurt = True", "def damage(self, dmg_value):\n if self.can_take_damage():\n # here we'll add if it's affected by negative buffs\n self.health -= dmg_value", "def deal_dmg(self):\n return self.damage", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def damageSubtractor(self, damage, target, caller):\n # Build the target av objects\n target_shield_value = target.db.shield_value # Applied conditionally\n target_armor = target.db.armor\n target_tough = target.db.tough\n target_armor_specialist = target.db.armor_specialist\n\n # Apply damage in order\n if target_shield_value:\n # Get value of shield damage to check if it's under 0. Need to pass\n # this on to armor\n shield_damage = target_shield_value - damage\n if shield_damage < 0:\n # Check if damage would make shield go below 0\n damage = abs(shield_damage)\n # Set shield_value to 0\n target.db.shield_value = 0\n # Recalc and set av with new shield value\n else:\n target.db.shield_value = shield_damage\n damage = 0\n\n if target_armor_specialist and damage:\n # Get value of damage\n armor_specialist_damage = target_armor_specialist - damage\n if armor_specialist_damage < 0:\n damage = abs(armor_specialist_damage)\n target.db.armor_specialist = 0\n else:\n target.db.armor_specialist = armor_specialist_damage\n damage = 0\n\n if target_armor and damage:\n # Get value of damage\n armor_damage = target_armor - damage\n if armor_damage < 0:\n damage = abs(armor_damage)\n target.db.armor = 0\n else:\n target.db.armor = armor_damage\n damage = 0\n\n if target_tough and damage:\n tough_damage = target_tough - damage\n if tough_damage < 0:\n damage = abs(tough_damage)\n target.db.tough = 0\n else:\n target.db.tough = tough_damage\n damage = 0\n else:\n self.deathSubtractor(damage, target, caller)\n\n new_av = self.updateArmorValue(target.db.shield_value, target.db.armor, target.db.tough, target.db.armor_specialist)\n\n return new_av", "def apply_ability_effects(ability: dict, target: Player, self: Player) -> None:\n for effect in ability[\"effects\"]:\n if effect[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=target\n )\n elif effect[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=self\n )", "def use(target, name):\n out = target.damage() + \"\\n\"\n return out + \"You swing the \" + name + \" at \" + target.name", "def ship_took_damage(self, damage: Damage):\n pass", "def take_damage(self, dmg, dtype = 1):\n self.game.hit_sound.play()\n \n #DR% = 1 - (100 / x). \n damageMultiplier = 100.0 / float(self.defense)\n #Apply defense buffs/debuffs\n #calculate damage:\n dmg -= self.absorbtion\n dmg *= damageMultiplier\n #apply damage\n self.hp[0] -= dmg", "def doDamage(self, owner, messages):\n self.damageDelegate.doDamage(owner, owner, None)\n messages.append(self.hurtItself)", "def passive(self,target):\r\n target.shield_increase(200,buff_type = \"temp\")", "def passive(self,target):\r\n target.dmg_increase(self.__dmg_increase,buff_type = \"permanent\")", "def Hit(self, damage):\n self.health -= damage", "def CombatAttack(self):\n self.attacker.Attack(target=self.defender)", "def attack(self, attack_name, pokemon_to_hit):\n attack = next(attack for attack in self.attacks if attack.name == attack_name)\n if pokemon_to_hit.weakness == self.type:\n pokemon_to_hit.hp -= attack.damage * 2\n else:\n pokemon_to_hit.hp -= attack.damage", "def deal_damage(self, damage):\n # Another cool trick\n self.current_health = max(\n 0,\n self.current_health-damage\n )", "def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None", "def coreDamage(self, user, target):\n scale = self.getScale()\n \n damage = super(DamageScaleDelegate, self).coreDamage(user, target)-2\n damage = damage*scale\n return damage + 2", "def damage(self):\n if not self.damage_mode and not self.attack_mode and not self.death_mode:\n self.damage_mode = True\n self.cut_frame_update = 0", "def attack(self, enemy):\n damage_dealt = self.damage - enemy.armor\n if damage_dealt < 1:\n damage_dealt = 1\n enemy.hit_points -= damage_dealt", "async def use(self):\n\n # init\n await self.caster.posture.change_posture(\"attacking\")\n\n move = Move_displayer()\n calculator = Damage_calculator(self.caster, self.target)\n checker = Effect_checker(self.target)\n\n # get the damage\n damage = randint(self.caster.damage.physical_min, self.caster.damage.physical_max)\n damage = await calculator.physical_damage(\n damage,\n critable = True,\n dodgable = True\n )\n\n # define move info\n _move = await move.get_new_move()\n\n _move[\"name\"] = self.name\n _move[\"icon\"] = self.icon\n _move[\"damage\"] = damage[\"calculated\"]\n _move[\"critical\"] = damage[\"critical\"]\n _move[\"dodge\"] = damage[\"dodge\"]\n _move[\"physical\"] = True\n\n _move = await move.offensive_move(_move)\n\n # inflict damage\n await self.target.receive_damage(damage[\"calculated\"])\n\n return(_move)", "def take_damage(self, damage):\n if self.hp - damage <= 0:\n self.hp = 0\n self.die()\n else:\n self.hp -= damage", "def effectOnMiss(self, user, target):\n return self.stopCharge(user)" ]
[ "0.7999557", "0.7960427", "0.79067296", "0.7736732", "0.7559663", "0.75042886", "0.73485667", "0.69856614", "0.6954562", "0.68673724", "0.6778077", "0.67357415", "0.66850173", "0.6644636", "0.6638605", "0.662029", "0.6539297", "0.65386426", "0.65321666", "0.65184796", "0.65008146", "0.6496057", "0.6479794", "0.6462085", "0.6440297", "0.64162344", "0.6388051", "0.63838446", "0.6383606", "0.6367558" ]
0.81057763
0
ability that deals damage to the target
def ability_3(self,target): damage = (self.get_dexterity()+self.get_strength()) target.receive_damage(damage)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ability_1(self,target):\r\n damage = (self.get_strength()+2)\r\n target.receive_damage(damage)", "def ability_4(self,target):\r\n damage = (self.get_strength()*3)\r\n target.receive_damage(damage)", "def ability_2(self,target):\r\n damage1 = (self.get_lvl()+self.get_strength())\r\n target.receive_damage(damage1)", "def _attack(self,target):\r\n damage = self.get_strength() * self.get_lvl()\r\n target.receive_damage(damage)", "def attack(self, target, friendly):\n self.on_attack(target, friendly)\n dmg = self.on_deal_dmg(target, friendly)\n self.death_remove(friendly)\n # May remove other minions in special cases\n # ... \n\n return dmg", "def deal_damage(self, target):\n if hasattr(target, \"hp\"):\n dmg = random.randrange(self.atk + 1)\n target.take_damage(dmg)\n return dmg", "def on_deal_dmg(self, target, friendly):\n if self.hurt:\n self.dfs -= target.atk\n if self.dfs <= 0 or target.poison:\n self.dead = True\n if target.hurt:\n target.dfs -= self.atk\n if target.dfs <= 0 or self.poison:\n target.dead = True\n\n # some special events may take place here\n # ... \n return self.atk", "def on_attack(self, target, friendly):\n # Get buff from Dread Admiral Eliza\n if self.race == 'pirate' or self.race == 'all':\n eliza_buff_atk, eliza_buff_dfs = friendly.friendly_eliza_buff\n for each in friendly.minions:\n each.get_buff(eliza_buff_atk, eliza_buff_dfs)\n\n # If divine shield, not getting hurt\n if not self.divine_shield:\n self.hurt = True\n if not target.divine_shield:\n target.hurt = True", "def damage(self, dmg_value):\n if self.can_take_damage():\n # here we'll add if it's affected by negative buffs\n self.health -= dmg_value", "def deal_dmg(self):\n return self.damage", "def can_take_damage(self):\n result = True\n if self.side_effects[\"shield\"] > 0:\n result = False\n return result", "def damageSubtractor(self, damage, target, caller):\n # Build the target av objects\n target_shield_value = target.db.shield_value # Applied conditionally\n target_armor = target.db.armor\n target_tough = target.db.tough\n target_armor_specialist = target.db.armor_specialist\n\n # Apply damage in order\n if target_shield_value:\n # Get value of shield damage to check if it's under 0. Need to pass\n # this on to armor\n shield_damage = target_shield_value - damage\n if shield_damage < 0:\n # Check if damage would make shield go below 0\n damage = abs(shield_damage)\n # Set shield_value to 0\n target.db.shield_value = 0\n # Recalc and set av with new shield value\n else:\n target.db.shield_value = shield_damage\n damage = 0\n\n if target_armor_specialist and damage:\n # Get value of damage\n armor_specialist_damage = target_armor_specialist - damage\n if armor_specialist_damage < 0:\n damage = abs(armor_specialist_damage)\n target.db.armor_specialist = 0\n else:\n target.db.armor_specialist = armor_specialist_damage\n damage = 0\n\n if target_armor and damage:\n # Get value of damage\n armor_damage = target_armor - damage\n if armor_damage < 0:\n damage = abs(armor_damage)\n target.db.armor = 0\n else:\n target.db.armor = armor_damage\n damage = 0\n\n if target_tough and damage:\n tough_damage = target_tough - damage\n if tough_damage < 0:\n damage = abs(tough_damage)\n target.db.tough = 0\n else:\n target.db.tough = tough_damage\n damage = 0\n else:\n self.deathSubtractor(damage, target, caller)\n\n new_av = self.updateArmorValue(target.db.shield_value, target.db.armor, target.db.tough, target.db.armor_specialist)\n\n return new_av", "def apply_ability_effects(ability: dict, target: Player, self: Player) -> None:\n for effect in ability[\"effects\"]:\n if effect[\"target\"] == \"target\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=target\n )\n elif effect[\"target\"] == \"self\":\n getattr(combat_effects, \"inflict_\" + effect[\"effect\"])(\n value=effect[\"value\"], player=self\n )", "def use(target, name):\n out = target.damage() + \"\\n\"\n return out + \"You swing the \" + name + \" at \" + target.name", "def ship_took_damage(self, damage: Damage):\n pass", "def take_damage(self, dmg, dtype = 1):\n self.game.hit_sound.play()\n \n #DR% = 1 - (100 / x). \n damageMultiplier = 100.0 / float(self.defense)\n #Apply defense buffs/debuffs\n #calculate damage:\n dmg -= self.absorbtion\n dmg *= damageMultiplier\n #apply damage\n self.hp[0] -= dmg", "def doDamage(self, owner, messages):\n self.damageDelegate.doDamage(owner, owner, None)\n messages.append(self.hurtItself)", "def passive(self,target):\r\n target.shield_increase(200,buff_type = \"temp\")", "def passive(self,target):\r\n target.dmg_increase(self.__dmg_increase,buff_type = \"permanent\")", "def Hit(self, damage):\n self.health -= damage", "def CombatAttack(self):\n self.attacker.Attack(target=self.defender)", "def attack(self, attack_name, pokemon_to_hit):\n attack = next(attack for attack in self.attacks if attack.name == attack_name)\n if pokemon_to_hit.weakness == self.type:\n pokemon_to_hit.hp -= attack.damage * 2\n else:\n pokemon_to_hit.hp -= attack.damage", "def deal_damage(self, damage):\n # Another cool trick\n self.current_health = max(\n 0,\n self.current_health-damage\n )", "def __attack(self, target):\n attack_difference = (Warrior.attack(self, target))\n if attack_difference > 5:\n print(\"Second attack with ANGRY!\")\n Warrior.attack(self, target)\n return None", "def coreDamage(self, user, target):\n scale = self.getScale()\n \n damage = super(DamageScaleDelegate, self).coreDamage(user, target)-2\n damage = damage*scale\n return damage + 2", "def damage(self):\n if not self.damage_mode and not self.attack_mode and not self.death_mode:\n self.damage_mode = True\n self.cut_frame_update = 0", "def attack(self, enemy):\n damage_dealt = self.damage - enemy.armor\n if damage_dealt < 1:\n damage_dealt = 1\n enemy.hit_points -= damage_dealt", "async def use(self):\n\n # init\n await self.caster.posture.change_posture(\"attacking\")\n\n move = Move_displayer()\n calculator = Damage_calculator(self.caster, self.target)\n checker = Effect_checker(self.target)\n\n # get the damage\n damage = randint(self.caster.damage.physical_min, self.caster.damage.physical_max)\n damage = await calculator.physical_damage(\n damage,\n critable = True,\n dodgable = True\n )\n\n # define move info\n _move = await move.get_new_move()\n\n _move[\"name\"] = self.name\n _move[\"icon\"] = self.icon\n _move[\"damage\"] = damage[\"calculated\"]\n _move[\"critical\"] = damage[\"critical\"]\n _move[\"dodge\"] = damage[\"dodge\"]\n _move[\"physical\"] = True\n\n _move = await move.offensive_move(_move)\n\n # inflict damage\n await self.target.receive_damage(damage[\"calculated\"])\n\n return(_move)", "def take_damage(self, damage):\n if self.hp - damage <= 0:\n self.hp = 0\n self.die()\n else:\n self.hp -= damage", "def effectOnMiss(self, user, target):\n return self.stopCharge(user)" ]
[ "0.81057763", "0.7960427", "0.79067296", "0.7736732", "0.7559663", "0.75042886", "0.73485667", "0.69856614", "0.6954562", "0.68673724", "0.6778077", "0.67357415", "0.66850173", "0.6644636", "0.6638605", "0.662029", "0.6539297", "0.65386426", "0.65321666", "0.65184796", "0.65008146", "0.6496057", "0.6479794", "0.6462085", "0.6440297", "0.64162344", "0.6388051", "0.63838446", "0.6383606", "0.6367558" ]
0.7999557
1
Return the path of the ocamlmerlin binary."
def merlin_bin(): user_settings = sublime.load_settings("Merlin.sublime-settings") merlin_path = user_settings.get('ocamlmerlin_path') if merlin_path: return merlin_path # For Mac OS X, add the path for homebrew if "/usr/local/bin" not in os.environ['PATH'].split(os.pathsep): os.environ['PATH'] += os.pathsep + "/usr/local/bin" opam_process = subprocess.Popen('opam config var bin', stdout=subprocess.PIPE, shell=True) opam_bin_path = opam_process.stdout.read().decode('utf-8').rstrip() + '/ocamlmerlin' if os.path.isfile(opam_bin_path) and os.access(opam_bin_path, os.X_OK): return opam_bin_path else: return 'ocamlmerlin'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_bin_dir():\n return os.path.abspath(os.path.join(get_root_dir(), 'bin/'))", "def dir_bin():\n return abspath('bin')", "def binary_location(cmd, USE_PATH=False):\n return os.path.join(BIN_PREFIX, cmd)", "def get_golem_path():\r\n return os.path.abspath(os.path.join(os.path.dirname(__file__), \"../\"))", "def binpath(self):\n return self._query_config()['binpath']", "def binpath(self):\n return self.__bin", "def get_reference_binary():\n return \"./Binary/linux-x64/astcenc\"", "def bin_path(self) -> Path:\n return self._root_path / \"stefan-on-software-api-client\" / \"bin\"", "def getBinary():\n binary = shutil.which(_ROCKETLOGGER_BINARY)\n\n if not os.path.exists(binary):\n raise FileNotFoundError(f\"Could not find RocketLogger CLI binary! [{binary}]\")\n return os.path.abspath(binary)", "def bin_root(self):\n return os.path.join(self.build_dir, self.build, \"stage0\")", "def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))", "def get_target_binary():\n file_location = prompt_base(\"where is the file located?\")\n file_location = os.path.abspath(file_location)\n return file_location", "def path_to_program_dir(self):\n\tpath = sys.argv[0]\n\n\tif not os.path.isdir(path):\n\t path = os.path.dirname(path)\n\n\tif not path: return '.'\n\n\treturn path", "def binary_location(cmd, USE_PATH=False):\n if USE_PATH:\n return cmd\n else:\n return os.path.join(BIN_PREFIX, cmd)", "def get_executable(self) -> str:\n ...", "def get_directory(self):\n mypath = mlblocks.get_primitives_paths()[-1]\n return mypath", "def get_test_binary():\n return \"./Source/astcenc\"", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def GetLauncherPath(self):\n return os.path.dirname(__file__)", "def _getCodeFolder(self):\n if getattr(sys, 'frozen', False):\n # we are running in a bundle (frozen)\n bundle_dir = sys._MEIPASS\n else:\n # we are running in a normal Python environment\n bundle_dir = os.path.dirname(os.path.abspath(__file__))\n return bundle_dir", "def get_installdir(self):\n import mewlo\n path = os.path.dirname(os.path.realpath(mewlo.__file__))\n return path", "def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)", "def bootstrap_binary(self):\n return os.path.join(self.build_dir, \"bootstrap\", \"debug\", \"bootstrap\")", "def get_output_path():\n return os.getcwd() + \"/output/\"", "def get_mtad_linter_path():\n return os.path.expandvars(os.path.join(\n \"%PROGRAMFILES(X86)%\", \"MEDITECH\", \"M-AT Tools\", \"M-AT_Code_Checker\"))", "def get_vernissagecmd_path():\n return vernissagecmd_path", "def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)", "def _get_path(): # THIS IS JUST FOR GETTING THE FILE\n return os.path.dirname(os.path.abspath(__file__)) + '/'", "def getFmeExePath(self):\n \n tmp = self.__fmepath.rstrip(\"\\\\/\")\n return tmp" ]
[ "0.682036", "0.6652295", "0.65895194", "0.6584268", "0.6460019", "0.64083457", "0.63974833", "0.6339136", "0.6329058", "0.63127565", "0.6281566", "0.6236012", "0.6232723", "0.62326777", "0.6230347", "0.61885166", "0.6138241", "0.61246544", "0.6089618", "0.6089618", "0.60891604", "0.6067412", "0.60656154", "0.6032475", "0.6023596", "0.6005696", "0.59991974", "0.59912395", "0.5980514", "0.59742695" ]
0.7753057
0