query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Sequence matrix_distance should look up distances from a matrix | def test_matrix_distance(self):
# note that the score matrix must contain 'diagonal' elements m[i][i]
# to avoid failure when the sequences match.
m = {"U": {"U": 0, "C": 1, "A": 5}, "C": {"C": 0, "A": 2, "G": 4}}
self.assertEqual(self.RNA("UUUCCC").matrix_distance("UCACGG", m), 14)
self.assertEqual(self.RNA("UUUCCC").matrix_distance("", m), 0)
self.assertEqual(self.RNA("UUU").matrix_distance("CAC", m), 7)
self.assertRaises(KeyError, self.RNA("UUU").matrix_distance, "CAG", m) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def distance_matrix(data):\n D = numpy.zeros( (data.shape[0], data.shape[0]) )\n for i in xrange(data.shape[0]):\n for j in xrange(i):\n D[i,j] = numpy.linalg.norm(data[i,:]-data[j,:])\n D[j,i] = D[i,j]\n\n return D",
"def _derive_euclidean_dm(self, cat_mat, dim):\r\n res_mat = []\r\n\r\n for i in range(dim):\r\n res_mat.append([0 for k in range(dim)])\r\n for j in range(i):\r\n res_mat[i][j] = self._vector_dist(cat_mat[i], cat_mat[j])\r\n res_mat[j][i] = res_mat[i][j]\r\n\r\n return DistanceMatrix(res_mat, self.DistanceMatrices[0].ids)",
"def _get_node_distance_matrix(\n self, datapoint: np.ndarray, som_array: np.ndarray\n ) -> np.ndarray:\n # algorithms on the full matrix\n if self.distance_metric == \"euclidean\":\n return np.linalg.norm(som_array - datapoint, axis=2)\n\n # node-by-node algorithms\n distmat = np.zeros((self.n_rows, self.n_columns))\n if self.distance_metric == \"manhattan\":\n for node in self.node_list_:\n distmat[node] = dist.cityblock(\n som_array[node[0], node[1]], datapoint\n )\n\n elif self.distance_metric == \"mahalanobis\":\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n cov = np.cov(\n np.stack((datapoint, som_node), axis=0), rowvar=False\n )\n cov_pinv = np.linalg.pinv(cov) # pseudo-inverse\n distmat[node] = dist.mahalanobis(datapoint, som_node, cov_pinv)\n\n elif self.distance_metric == \"tanimoto\":\n # Note that this is a binary distance measure.\n # Therefore, the vectors have to be converted.\n # Source: Melssen 2006, Supervised Kohonen networks for\n # classification problems\n # VERY SLOW ALGORITHM!!!\n threshold = 0.5\n for node in self.node_list_:\n som_node = som_array[node[0], node[1]]\n distmat[node] = dist.rogerstanimoto(\n binarize(\n datapoint.reshape(1, -1),\n threshold=threshold,\n copy=True,\n ).ravel(),\n binarize(\n som_node.reshape(1, -1), threshold=threshold, copy=True\n ).ravel(),\n )\n\n elif self.distance_metric == \"spectralangle\":\n for node in self.node_list_:\n distmat[node] = np.arccos(\n np.divide(\n np.dot(som_array[node[0], node[1]], datapoint),\n np.multiply(\n # TODO check if an axis needs to be set here\n np.linalg.norm(som_array),\n np.linalg.norm(datapoint),\n ),\n )\n )\n\n return distmat",
"def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]",
"def create_dist_matrix(matrix):\n #Convert input data matrix to numpy matrix\n matrix = np.array(matrix)\n n = matrix.shape[0]\n \n #Iterate through number of samples to create distance matrix\n for i in range(n):\n dist_array = euclidean_distance(matrix[i,:], matrix)\n if i == 0:\n dist_matrix = dist_array\n else:\n dist_matrix = np.concatenate((dist_matrix, dist_array), axis = 1)\n return dist_matrix",
"def distance_matrix(sequences, substitution_mat):\n distance_mat = numpy.empty((len(sequences), len(sequences)), dtype='float')\n\n print(\"Building distance matrix\")\n # Get similarity score\n for i, seqA in enumerate(sequences):\n sys.stdout.write(\"\\r%.f%%\" % (float(i+1)/len(sequences)*100))\n sys.stdout.flush()\n for j, seqB in enumerate(sequences[i:], start=i):\n score = substitution_score(substitution_mat, seqA, seqB)\n distance_mat[i, j] = score\n distance_mat[j, i] = score\n print(\"\")\n # Set equal the diagonal\n diag_mini = numpy.min(distance_mat.diagonal())\n for i in range(len(sequences)):\n distance_mat[i, i] = diag_mini\n # Convert similarity score into a distance\n mini = numpy.min(distance_mat)\n maxi = numpy.max(distance_mat)\n return 1 - (distance_mat + abs(mini))/(maxi - mini)",
"def distance_matrix(n_row, n_col):\n\n n_pop = int(n_row * n_col)\n center = int(n_row/2*(n_col+1))\n\n pop_idx = np.arange(n_pop)\n pop_idx_col = np.remainder(pop_idx, n_col)\n pop_idx_row = pop_idx // n_row\n\n pos = np.vstack((pop_idx_col,pop_idx_row)).T\n distance = spa.distance.cdist([pos[center]], pos)[0]\n\n return distance",
"def distance_matrix(X, Y, metric):\n distance = np.zeros((len(X), len(Y)))\n for i in range(len(X)):\n for j in range(len(Y)):\n m = metric(X[i], Y[j])\n if np.isnan(m):\n pdb.set_trace()\n distance[i, j] = m\n return distance",
"def compute_l2_distance_matrix(features_queries, features_dataset):\n sx = np.sum(features_queries ** 2, axis=1, keepdims=True)\n sy = np.sum(features_dataset ** 2, axis=1, keepdims=True)\n\n return np.sqrt(-2 * features_queries.dot(features_dataset.T) + sx + sy.T)",
"def distances(self):",
"def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix",
"def get_distances(self):\n N = len(self.cells) # Number of cells\n distances = np.zeros([N, N]) # distances between cells\n positions = self.position_matrix() # positions of cells \n \n # get distances between cells (exploit symmetry between upper and lower triangular form)\n for i, position in enumerate(positions[:-1, :]): # Iterate matrix except the last one\n directions = positions[i+1:, :] - position # direction from i to j > i\n distances[i, i+1:] = np.linalg.norm(directions, axis=1) # length of directions\n \n return distances + distances.T # Add lower triangle of matrix to upper ",
"def compute_distance_matrix_from_metadata(column_data):\r\n data_row = array(column_data)\r\n data_col = reshape(data_row, (1, len(data_row)))\r\n dist_mtx = abs(data_row - data_col.T)\r\n\r\n return dist_mtx",
"def measure_distance(self, mat):\n if len(mat) == 1:\n print(\"chain has only one CAatom\")\n return\n self.dists = []\n for num in range(0, len(mat)):\n if num + 1 <= len(mat) - 1:\n c1 = mat[num]\n c2 = mat[num + 1]\n d = c2 - c1\n self.dists.append(math.sqrt(np.sum(d * d)))\n return self.dists",
"def im_dist_mat(self):\n mat = np.zeros([self.I, self.M])\n for i in range(self.I):\n for m in range(self.M):\n mat[i, m] = distance(self.I_coords[i], self.M_coords[m])\n return mat",
"def compute_distance(self, transpose=False):\n\n # Calculate distance matrix\n if transpose:\n distance_matrix = pdist(self.matrix.T, self.distance)\n else:\n distance_matrix = pdist(self.matrix, self.distance)\n\n # Remove NaNs\n distance_matrix[np.isnan(distance_matrix)] = 1.0\n\n return distance_matrix",
"def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance",
"def _generate_distance_kernel_matrix(self):\n with self._rw_lock.read_lock():\n # Create matrix whose elements are the distances between all row\n # permutations\n fmat = self._feature_mat # shorter name\n num_rows = fmat.shape[0]\n\n # distance kernel is a square matrix based on feature samples\n dist_kernel = np.mat(np.ndarray((num_rows,)*2))\n self._log.info(\"Creating distance kernel with shape %s\",\n dist_kernel.shape)\n\n timer_log = logging.getLogger('.'.join((self.__module__,\n self.__class__.__name__,\n \"SimpleTimer\")))\n\n for i in xrange(num_rows - 1):\n with SimpleTimer('computing distances from row %d to [%d-%d]'\n % (i, i+1, num_rows-1), timer_log):\n dist_kernel[i, i] = 1.0\n for j in xrange(i + 1, num_rows):\n dist = self._histogram_intersection_distance(fmat[i],\n fmat[j])\n dist_kernel[i, j] = dist_kernel[j, i] = dist\n dist_kernel[-1, -1] = 1.0\n return dist_kernel",
"def nm_dist_mat(self):\n mat = np.zeros([self.N, self.M])\n for n in range(self.N):\n for m in range(self.M):\n mat[n, m] = distance(self.N_coords[n], self.M_coords[m])\n return mat",
"def distance_matrix_vector(anchor, positive):\n \n # here anchor*anchor is equal torch.mul(anchor, anchor)\n # the element-wise value multiplication is returned\n d1_sq = torch.sum(anchor * anchor, dim=1).unsqueeze(-1)\n d2_sq = torch.sum(positive * positive, dim=1).unsqueeze(-1)\n\n eps = 1e-6\n # tensor.repeat(): repeat at each dims, and dims from right to left\n return torch.sqrt((d1_sq.repeat(1, anchor.size(0)) + torch.t(d2_sq.repeat(1, positive.size(0)))\n - 2.0 * torch.bmm(anchor.unsqueeze(0), torch.t(positive).unsqueeze(0)).squeeze(0))\n +eps)",
"def getDistanceMatrix(self):\n v = self.getVectors()\n vLis = v.keys()\n N = len(v.keys())\n D = np.zeros([N, N], dtype=np.float32)\n print(N)\n for i in range(N):\n print(\"%d/%d\" %(i, N))\n D[i, i] = 1\n for j in range(i + 1, N):\n dist = self.cosin_sim_pairs(v[vLis[i]], v[vLis[j]])\n D[i, j] = dist\n D[j, i] = dist\n return D",
"def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists",
"def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)",
"def get_distance_matrix(df, distance_measure, feat_col_ix=1):\n n = len(df)\n dist_matrix = np.zeros((n,n))\n for i in range(n):\n for j in range(j):\n si = df.iloc[i, feat_col_ix:]\n sj = df.iloc[j, feat_col_ix:]\n dist_matrix[i,j] = distance_measure(si, sj)[0]\n return dist_matrix",
"def compute_euclidean_distance_matrix(locations):\n distances = {}\n distances_df=get_times(locations)\n print(distances_df)\n print(distances_df.iloc[0,0])\n print(distances_df.iloc[0,1])\n print(distances_df.iloc[0,2])\n for from_counter, from_node in enumerate(locations):\n distances[from_counter] = {}\n for to_counter, to_node in enumerate(locations):\n distances[from_counter][to_counter] = (int(\n distances_df.iloc[from_counter,to_counter]))\n return distances",
"def dist_matrix(self):\n return self.__dist_matrix",
"def closest_dna_dist(matrixfile):\n\n global verbose\n if verbose:\n sys.stderr.write(\"Getting closest distances\\n\")\n distances = {}\n\n if matrixfile.endswith('.gz'):\n with gzip.open(matrixfile, 'rt') as f:\n l = f.readline()\n ids = l.rstrip().split(\"\\t\")\n for i,name in enumerate(ids):\n if i == 0:\n continue\n distances[name] = {}\n for l in f:\n data = l.rstrip().split(\"\\t\")\n for i,dist in enumerate(data):\n if i == 0:\n continue\n distances[data[0]][ids[i]] = float(dist)\n distances[ids[i]][data[0]] = float(dist)\n else:\n with open(matrixfile, 'r') as f:\n l = f.readline()\n ids = l.rstrip().split(\"\\t\")\n for i,name in enumerate(ids):\n if i == 0:\n continue\n distances[name] = {}\n for l in f:\n data = l.rstrip().split(\"\\t\")\n for i,dist in enumerate(data):\n if i == 0:\n continue\n distances[data[0]][ids[i]] = float(dist)\n distances[ids[i]][data[0]] = float(dist)\n\n\n closest = {}\n for d in distances:\n closest[d] = {}\n for k in sorted(distances[d], key=distances[d].get):\n if k == d:\n continue\n closest[d][k] = distances[d][k]\n break\n if verbose:\n sys.stderr.write(\"From\\tTo\\tDistance\\n\")\n for d in distances:\n for k in closest[d]:\n sys.stderr.write(\"{}\\t{}\\t{}\\n\".format(d, k, closest[d][k]))\n\n\n if verbose:\n sys.stderr.write(\"\\n\\n\\nDone\\n\")\n return closest",
"def getDistanceMatrix(self):\n return self.distmat.as_matrix()"
] | [
"0.6737727",
"0.6700531",
"0.65607816",
"0.6550453",
"0.6502882",
"0.6492697",
"0.6485367",
"0.6467",
"0.64162374",
"0.6375636",
"0.6326314",
"0.63200414",
"0.63023067",
"0.6289097",
"0.6281347",
"0.62686527",
"0.6267945",
"0.62410456",
"0.6240649",
"0.62321705",
"0.62179285",
"0.62137336",
"0.62014437",
"0.6199634",
"0.6107576",
"0.6089918",
"0.60740066",
"0.6057635",
"0.6047035",
"0.6042571"
] | 0.7115163 | 0 |
Sequence frac_diff should return difference between sequences | def test_frac_diff(self):
s1 = self.RNA("ACGU")
s2 = self.RNA("AACG")
s3 = self.RNA("GG")
s4 = self.RNA("A")
e = self.RNA("")
self.assertEqual(s1.frac_diff(e), 0)
self.assertEqual(s1.frac_diff(s2), 0.75)
self.assertEqual(s1.frac_diff(s3), 1)
self.assertEqual(s1.frac_diff(s4), 0) # note truncation | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)",
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)",
"def seq_numbers_diff(start_seq: int, end_seq: int) -> int:\n if start_seq < 0 or end_seq < 0:\n return None\n if start_seq > end_seq:\n return end_seq + (SEQ_NUM_MOD_CONST - start_seq)\n else:\n return end_seq - start_seq",
"def diff_frac(data_1, data_2):\n\n frac_1 = np.sum(data_1) / len(data_1)\n frac_2 = np.sum(data_2) / len(data_2)\n\n return frac_1 - frac_2",
"def difference(seq, *seqs):\n yield from differenceby(None, seq, *seqs)",
"def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def reverse_difference():",
"def diff(x):\n return x[1:] - x[:-1]",
"def get_periods(a,t):\n ex = get_extrema(a,t)[1]\n \n l = ipol(ex,0)\n \n diff = np.diff(l)\n \n return diff",
"def delta_coeffs(self, mfcc_seq):\n\n length = mfcc_seq.shape[1]\n deltas = np.empty(mfcc_seq.shape)\n deltas2 = np.empty(mfcc_seq.shape)\n\n for (coeff, samp), value in np.ndenumerate(deltas):\n if samp == 0 or samp == length-1:\n deltas[coeff, samp] = 0\n deltas2[coeff, samp] = 0\n else:\n deltas[coeff, samp] = mfcc_seq[coeff, samp+1] \\\n - mfcc_seq[coeff, samp-1]\n deltas2[coeff, samp] = 0.5*mfcc_seq[coeff, samp+1] \\\n - 2*mfcc_seq[coeff, samp] \\\n + 0.5*mfcc_seq[coeff, samp-1]\n print(\"AudioClip--Deltas Retrieved\")\n return (deltas, deltas2)",
"def get_all_diffs(sequence, d):\n\n\tdiffs = set([sequence])\n\tindex_combos = get_combinations(range(len(sequence)), d)\n\tfor combo in index_combos:\n\t\tcurrent_combos = set([sequence])\n\t\tfor index in combo:\n\t\t\tfor t in set(current_combos):\n\t\t\t\tfor nuc in alpha:\n\t\t\t\t\tcurrent_combos.add(t[0:index] + nuc + t[index+1:])\n\t\tdiffs = diffs.union(current_combos)\n\treturn diffs",
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)",
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same_gaps(s1), 1)\n self.assertEqual(s1.frac_same_gaps(s2), 1)\n self.assertEqual(s1.frac_same_gaps(s3), 0)\n self.assertEqual(s1.frac_same_gaps(s4), 0.5)\n self.assertEqual(s1.frac_same_gaps(s5), 0.5)\n self.assertEqual(s1.frac_same_gaps(s6), 0.5)\n self.assertEqual(s1.frac_same_gaps(s7), 0)\n self.assertEqual(s1.frac_same_gaps(e), 0)\n self.assertEqual(s3.frac_same_gaps(s3), 1)\n self.assertEqual(s3.frac_same_gaps(s4), 0.5)\n self.assertEqual(s3.frac_same_gaps(s7), 1.0)\n self.assertEqual(e.frac_same_gaps(e), 0.0)\n self.assertEqual(s4.frac_same_gaps(s5), 0.0)\n self.assertEqual(s4.frac_same_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0)",
"def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)",
"def array_diff(a, b):",
"def differences(input_list):\n output_list = []\n for x in range(1,len(input_list)):\n output_list.append(input_list[x]-input_list[x-1])\n return output_list",
"def set_diff(seq0, seq1):\n return list(set(seq0) - set(seq1))",
"def deltas(L):\n return map(sub, tuple(L)[1:], L)",
"def transition_transversion_ratio(sequences):\n transitions, transversions = 0, 0\n\n for a1, a2 in zip(*sequences):\n if a1 != a2:\n # Transition\n if a1 in \"AG\" and a2 in \"AG\" or a1 in \"CT\" and a2 in \"CT\":\n transitions += 1\n # Transversion\n else:\n transversions += 1\n\n return transitions / transversions",
"def test_frac_same(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same(e), 0)\n self.assertEqual(s1.frac_same(s2), 0.25)\n self.assertEqual(s1.frac_same(s3), 0)\n self.assertEqual(s1.frac_same(s4), 1.0) # note truncation",
"def seqDiffs(my_seq):\n return ((m.start(0), m.group()) for m in re.finditer('[A-Z]|_+', my_seq))",
"def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)",
"def rat2frac_list(x, y):\n\tcont = rat2cont_quot(x, y)\n\tfrac = []\n\tfor i in range(len(cont)):\n\t\tfrac.append(cont2frac(cont[:(i+1)]))\n\treturn frac",
"def get_diff(text_1, text_2):\n\n return str(round(SequenceMatcher(None, text_1, text_2).ratio()*100, 2)) + '%'",
"def sub4(a,b):\n return [a[0]-b[0],a[1]-b[1],a[2]-b[2],a[3]-b[3]]",
"def _kendall_tau_diff(self, a, b, i):\n # compute ordering relation of the single points a[i] and b[i]\n # with all previous points of the sequences a and b, respectively\n a_pos = np.zeros(a[:i].size, dtype=int)\n a_pos[a[:i] > a[i]] = 1\n a_pos[a[:i] < a[i]] = -1\n b_pos = np.zeros(b[:i].size, dtype=int)\n b_pos[b[:i] > b[i]] = 1\n b_pos[b[:i] < b[i]] = -1\n diff_pos = np.dot(a_pos, b_pos).astype(float)\n\n # compute ordering relation of the single points a[i] and b[i]\n # with all later points of the sequences\n a_neg = np.zeros(a[i:].size, dtype=int)\n a_neg[a[i:] > a[i]] = 1\n a_neg[a[i:] < a[i]] = -1\n b_neg = np.zeros(b[i:].size, dtype=int)\n b_neg[b[i:] > b[i]] = 1\n b_neg[b[i:] < b[i]] = -1\n diff_neg = np.dot(a_neg, b_neg)\n\n return diff_pos, diff_neg",
"def frac_reverts(self):\n\n text = self.text()\n num_reverts = len(re.findall(\"Reverted\",text))\n return num_reverts/500",
"def sub(a,b):\n return [a[0]-b[0],a[1]-b[1],a[2]-b[2],1.0]",
"def _compute_diff(self, begin, end):\n d = self.diff\n x = self.x\n for i in range(begin, end):\n for j in range(i):\n d[i].append((d[i][j] - d[i-1][j]) / (x[i] - x[i-j-1]))",
"def temp_difference_cal(time_list):\n if len(time_list) > 1:\n final = float(time_list[len(time_list) - 1])\n initial = float(time_list[len(time_list) - 2])\n difference = final - initial\n else:\n difference = -1.0\n return difference"
] | [
"0.6682282",
"0.65871775",
"0.6406857",
"0.63614774",
"0.633233",
"0.60608715",
"0.5996062",
"0.5925066",
"0.5844651",
"0.58167034",
"0.57268333",
"0.56967485",
"0.56595033",
"0.56105644",
"0.5590587",
"0.5588716",
"0.55883276",
"0.55557704",
"0.554874",
"0.5516476",
"0.5486781",
"0.5485635",
"0.54403263",
"0.5429278",
"0.5402806",
"0.5395943",
"0.53931785",
"0.5352675",
"0.53481305",
"0.5330733"
] | 0.69646966 | 0 |
Sequence frac_same_gaps should return similarity in gap positions | def test_frac_same_gaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("GGGG")
s3 = self.RNA("----")
s4 = self.RNA("A-A-")
s5 = self.RNA("-G-G")
s6 = self.RNA("UU--")
s7 = self.RNA("-")
s8 = self.RNA("GGG")
e = self.RNA("")
self.assertEqual(s1.frac_same_gaps(s1), 1)
self.assertEqual(s1.frac_same_gaps(s2), 1)
self.assertEqual(s1.frac_same_gaps(s3), 0)
self.assertEqual(s1.frac_same_gaps(s4), 0.5)
self.assertEqual(s1.frac_same_gaps(s5), 0.5)
self.assertEqual(s1.frac_same_gaps(s6), 0.5)
self.assertEqual(s1.frac_same_gaps(s7), 0)
self.assertEqual(s1.frac_same_gaps(e), 0)
self.assertEqual(s3.frac_same_gaps(s3), 1)
self.assertEqual(s3.frac_same_gaps(s4), 0.5)
self.assertEqual(s3.frac_same_gaps(s7), 1.0)
self.assertEqual(e.frac_same_gaps(e), 0.0)
self.assertEqual(s4.frac_same_gaps(s5), 0.0)
self.assertEqual(s4.frac_same_gaps(s6), 0.5)
self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)",
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)",
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def test_frac_same(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same(e), 0)\n self.assertEqual(s1.frac_same(s2), 0.25)\n self.assertEqual(s1.frac_same(s3), 0)\n self.assertEqual(s1.frac_same(s4), 1.0) # note truncation",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def match_gc_content(pos_one_hot, neg_one_hot, neg_pos_ratio=1):\n N, L, A = pos_one_hot.shape\n gc_pos = np.sum(np.sum(pos_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n gc_neg = np.sum(np.sum(neg_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n print(' Average GC content for positive sequences: %.3f'%(np.mean(gc_pos)))\n print(' Average GC content for negative sequences: %.3f'%(np.mean(gc_neg)))\n\n pos_index = np.argsort(gc_pos)\n neg_index = np.argsort(gc_neg)\n num_neg = len(neg_index)\n num_pos = len(pos_index)\n\n match_index = []\n if num_neg > num_pos:\n k = 0\n status = True\n for i in pos_index:\n for j in range(k, num_neg):\n if gc_pos[i] < gc_neg[neg_index[j]]:\n if k > num_neg:\n status = False\n break\n else:\n # print(\"%.2f vs %.2f\"%(gc_pos[i], gc_neg[neg_index[j]]))\n match_index.append(neg_index[j])\n k = j+1\n break\n if not status:\n break\n\n remainder = int(num_pos*neg_pos_ratio) - len(match_index)\n print(' Found %d GC-matched sequences.'%(len(match_index)))\n if remainder > 0:\n print(' Adding %d more random negative sequences.'%(remainder))\n remain_index = np.array(list(set(range(num_neg)) - set(match_index)))\n index = np.random.permutation(len(remain_index))[:remainder] \n # index = np.argsort(gc_neg[remain_index])[::-1]\n for n in remain_index[index[:remainder]]:\n match_index.append(n)\n \n match_index = np.array(match_index)\n print(' Average GC content for sub-sampled negative sequences: %.3f'%(np.mean(gc_neg[match_index])))\n\n return neg_one_hot[match_index], match_index",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def needleman_wunsch(\n seq1, seq2, match=1, mismatch=-1, gap_open=-5, gap_extend=-3, at_genome_start=False\n):\n alignments = pairwise2.align.globalms(\n seq1,\n seq2,\n match,\n mismatch,\n gap_open,\n gap_extend,\n )\n # Alignments is a list of tuples. Each tuple has length 5. Entries:\n # 0: seq1 alignment (ie with dashes for indels)\n # 1: seq2 alignemnt\n # 2: alignment score\n # 4, 5: don't know (not using them)\n if len(alignments) == 1:\n return alignments[0][0], alignments[0][1]\n\n if at_genome_start:\n best_pos = last_gap_end_in_string(alignments[0][1])\n else:\n best_pos = alignments[0][1].find(\"-\")\n\n best = alignments[0]\n\n for a in alignments[1:]:\n if at_genome_start:\n gap_pos = last_gap_end_in_string(a[1])\n else:\n gap_pos = a[1].find(\"-\")\n\n if gap_pos > best_pos:\n best = a\n best_pos = gap_pos\n\n return best[0], best[1]",
"def test_gap_maps(self):\n empty = \"\"\n no_gaps = \"aaa\"\n all_gaps = \"---\"\n start_gaps = \"--abc\"\n end_gaps = \"ab---\"\n mid_gaps = \"--a--b-cd---\"\n\n def gm(x):\n return self.RNA(x).gap_maps()\n\n self.assertEqual(gm(empty), ({}, {}))\n self.assertEqual(gm(no_gaps), ({0: 0, 1: 1, 2: 2}, {0: 0, 1: 1, 2: 2}))\n self.assertEqual(gm(all_gaps), ({}, {}))\n self.assertEqual(gm(start_gaps), ({0: 2, 1: 3, 2: 4}, {2: 0, 3: 1, 4: 2}))\n self.assertEqual(gm(end_gaps), ({0: 0, 1: 1}, {0: 0, 1: 1}))\n self.assertEqual(\n gm(mid_gaps), ({0: 2, 1: 5, 2: 7, 3: 8}, {2: 0, 5: 1, 7: 2, 8: 3})\n )",
"def compare_gene_predictors(GM_genes, Glim_genes):\n GM_starts = []\n Glim_starts = []\n GM_only = []\n Glim_only = []\n shared_starts = []\n # GM_stops = []\n # Glim_stops = []\n Glim_unique = 0\n GM_unique = 0\n\n for i in range(1,GM_genes[\"total genes\"]+1):\n GM_starts.append(GM_genes[\"gene\" + str(i)][\"start\"])\n for j in range(1,Glim_genes[\"total genes\"]+1):\n Glim_starts.append (Glim_genes[\"gene\"+ str(j)][\"start\"])\n for i in range(0,len(GM_starts)):\n if GM_starts[i] not in Glim_starts:\n print(\"start at pos. \" + str(GM_starts[i]) + \" is unique to GM genes\")\n GM_only.append(GM_starts[i])\n GM_unique += 1\n else:\n shared_starts.append(GM_starts[i])\n for j in range(0,len(Glim_starts)):\n if Glim_starts[j] not in GM_starts:\n print (\"start at pos. \" + str(Glim_starts[j]) + \" is unique to Glim genes\")\n Glim_only.append(Glim_starts[j])\n Glim_unique += 1\n else:\n if GM_starts[j] not in shared_starts:\n shared_starts.append(GM_starts[j])\n shared_starts.sort()\n print (\"Number of unique Glimmer starts = \" + str(Glim_unique))\n print (\"Number of unique GM starts = \" + str(GM_unique))\n print(\"Shared starts =\\n\")\n for k in range(0,len(shared_starts)):\n print (shared_starts[k])",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def test_is_gap(self):\n r = self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\")\n for char in \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM\":\n assert not r.is_gap(char)\n assert r.is_gap(\"-\")\n # only works on a single literal that's a gap, not on a sequence.\n # possibly, this behavior should change?\n assert not r.is_gap(\"---\")\n # check behaviour on self\n assert not self.RNA(\"CGAUACGUACGACU\").is_gap()\n assert not self.RNA(\"---CGAUA----CGUACG---ACU---\").is_gap()\n assert self.RNA(\"\").is_gap()\n assert self.RNA(\"----------\").is_gap()",
"def find_gaps(s, gapcode=45):\n return nonzero(fromstring(s,dtype=uint8) == gapcode)",
"def find_gaps(s, gapcode=45):\r\n return nonzero(fromstring(s, dtype=uint8) == gapcode)",
"def pos_gaps(df, gaps):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0)#.max(axis=0).ge(conservation * nb_rows)\n\n ge = []\n for i in value_counts.columns:\n try:\n if value_counts[i]['-'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n try:\n if value_counts[i]['.'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n return ge",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def get_percent_identity(seq1, seq2, count_gaps=False):\n\n # Make sure the sequence content is a string\n seq1 = str(seq1)\n seq2 = str(seq2)\n\n # print (seq1)\n # print (seq2)\n\n matches = sum(aa1 == aa2 for aa1, aa2 in zip(seq1, seq2) if aa1 != \"-\" and aa2 != \"-\")\n\n # Set the length based on whether we want identity to count gaps or not\n # length = len(seq1) if count_gaps else min(len(seq1.replace(\"-\", \"\"))- seq2.count(\"-\"), len(seq2.replace(\"-\", \"\")) - seq1.count(\"-\"))\n if count_gaps:\n length = len(seq1)\n else:\n length = sum ([1 for (aa1, aa2) in zip(seq1, seq2) if aa1 != \"-\" and aa2 != \"-\"])\n\n # print ('matches ', matches)\n # print ('length ', length)\n\n pct_identity = 100.0 * matches / length\n\n return pct_identity",
"def calculateOverlap(mfaPairs, exons1, exons2):\n exonSize = 0\n noMatchToExons = 0\n noMatchToNonExons = 0\n noMatchToGapsInExons = 0\n noMatchToGapsNotInExons = 0\n \n p = -100000\n for i in exons1:\n exonSize += i[1] - i[0]\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n l = k\n if k == GAP:\n l = p \n for m in exons2:\n if(l >= m[0] and l < m[1]):\n if k == GAP:\n noMatchToGapsInExons += 1\n else:\n noMatchToExons += 1\n break\n else:\n if k == GAP:\n noMatchToGapsNotInExons += 1\n else:\n noMatchToNonExons += 1\n if k != GAP:\n p = k\n return (exonSize, noMatchToExons, noMatchToNonExons,\\\n noMatchToGapsInExons, noMatchToGapsNotInExons)",
"def get_subscore(seq1, seq2, matrix=matrix, gap_s=gap_s, gap_e=gap_e):\n score = 0\n gap = False\n for i in range(len(seq1)):\n pair = (seq1[i], seq2[i])\n if not gap:\n if '-' in pair:\n gap = True\n score += gap_s\n else:\n score += _blosum_match(pair, matrix)\n else:\n if '-' not in pair:\n gap = False\n score += _blosum_match(pair, matrix)\n else:\n score += gap_e\n return score",
"def testSeqMatch(self): # - - - - - - - - - - - - - - - - - - - - - - - - -\n\n for pair in self.string_pairs:\n\n approx_str_value = stringcmp.seqmatch(pair[0],pair[1])\n\n assert (isinstance(approx_str_value,float)), \\\n '\"SeqMatch\" does not return a floating point number for: '+ \\\n str(pair)\n\n assert (approx_str_value >= 0.0), \\\n '\"SeqMatch\" returns a negative number for: '+str(pair)\n\n assert (approx_str_value <= 1.0), \\\n '\"SeqMatch\" returns a number larger than 1.0 for: '+str(pair)\n\n approx_str_value_1 = stringcmp.seqmatch(pair[0],pair[1])\n approx_str_value_2 = stringcmp.seqmatch(pair[1],pair[0])\n\n assert (approx_str_value_1 == approx_str_value_2), \\\n '\"SeqMatch\" returns different values for pair and swapped ' + \\\n 'pair: '+str(pair)+': '+str(approx_str_value_1)+', '+ \\\n str(approx_str_value_2)\n\n # Check for value 1.0 if the strings are the same\n #\n if (pair[0] == pair[1]):\n\n assert (approx_str_value == 1.0), \\\n '\"SeqMatch\" does not return 1.0 if strings are equal: '+ \\\n str(pair)",
"def compare(seq1, seq2):\n if seq1 == seq2:\n return 1\n len_diff = len(seq1) / len(seq2)\n if len_diff > 1:\n len_diff = 1 / len_diff\n\n ngrams1 = {tuple(ng) for ng in get_all_ngrams(seq1)}\n ngrams2 = {tuple(ng) for ng in get_all_ngrams(seq2)}\n\n overall = len(ngrams1 & ngrams2) / len(ngrams1 | ngrams2)\n if overall == 1 or overall == 0:\n return overall\n\n try:\n max_match = len(max(ngrams1 & ngrams2, key=len)) / len(seq1)\n except ValueError:\n return 0\n\n return (len_diff + max_match + overall) / 3"
] | [
"0.7100668",
"0.68241644",
"0.6488444",
"0.6251183",
"0.622191",
"0.62168777",
"0.6174029",
"0.6057268",
"0.59166205",
"0.58478665",
"0.5809226",
"0.57599026",
"0.57473063",
"0.57302",
"0.57302",
"0.57115066",
"0.5697203",
"0.5636952",
"0.5636567",
"0.5633265",
"0.56134945",
"0.5611183",
"0.56058484",
"0.5591779",
"0.55889124",
"0.5587399",
"0.5519574",
"0.55155385",
"0.55116105",
"0.5502122"
] | 0.73995185 | 0 |
Sequence frac_diff_gaps should return difference in gap positions | def test_frac_diffGaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("GGGG")
s3 = self.RNA("----")
s4 = self.RNA("A-A-")
s5 = self.RNA("-G-G")
s6 = self.RNA("UU--")
s7 = self.RNA("-")
s8 = self.RNA("GGG")
e = self.RNA("")
self.assertEqual(s1.frac_diff_gaps(s1), 0)
self.assertEqual(s1.frac_diff_gaps(s2), 0)
self.assertEqual(s1.frac_diff_gaps(s3), 1)
self.assertEqual(s1.frac_diff_gaps(s4), 0.5)
self.assertEqual(s1.frac_diff_gaps(s5), 0.5)
self.assertEqual(s1.frac_diff_gaps(s6), 0.5)
self.assertEqual(s1.frac_diff_gaps(s7), 1)
self.assertEqual(s1.frac_diff_gaps(e), 0)
self.assertEqual(s3.frac_diff_gaps(s3), 0)
self.assertEqual(s3.frac_diff_gaps(s4), 0.5)
self.assertEqual(s3.frac_diff_gaps(s7), 0.0)
self.assertEqual(e.frac_diff_gaps(e), 0.0)
self.assertEqual(s4.frac_diff_gaps(s5), 1.0)
self.assertEqual(s4.frac_diff_gaps(s6), 0.5)
self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same_gaps(s1), 1)\n self.assertEqual(s1.frac_same_gaps(s2), 1)\n self.assertEqual(s1.frac_same_gaps(s3), 0)\n self.assertEqual(s1.frac_same_gaps(s4), 0.5)\n self.assertEqual(s1.frac_same_gaps(s5), 0.5)\n self.assertEqual(s1.frac_same_gaps(s6), 0.5)\n self.assertEqual(s1.frac_same_gaps(s7), 0)\n self.assertEqual(s1.frac_same_gaps(e), 0)\n self.assertEqual(s3.frac_same_gaps(s3), 1)\n self.assertEqual(s3.frac_same_gaps(s4), 0.5)\n self.assertEqual(s3.frac_same_gaps(s7), 1.0)\n self.assertEqual(e.frac_same_gaps(e), 0.0)\n self.assertEqual(s4.frac_same_gaps(s5), 0.0)\n self.assertEqual(s4.frac_same_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0)",
"def gaps(df):\n return [(round(df[i][\"o\"] - df[i - 1][\"c\"], 2)) for i in range(1, len(df))]",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)",
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def pos_gaps(df, gaps):\n nb_rows, nb_cols = df.shape\n\n value_counts = df.apply(pd.Series.value_counts, axis=0)#.max(axis=0).ge(conservation * nb_rows)\n\n ge = []\n for i in value_counts.columns:\n try:\n if value_counts[i]['-'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n try:\n if value_counts[i]['.'] > nb_rows * gaps:\n ge.append(i)\n continue\n except:\n pass\n return ge",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def get_interval_list_predefined_gap(traces_list, gap_interval):\n\n intv = 0\n interval_list = []\n pre_traces = []\n\n for timst in traces_list:\n timst = timst.replace(microsecond=0)\n pre_traces.append(timst)\n\n for i in range(0, len(pre_traces)-1):\n iat = (pre_traces[i+1]-pre_traces[i]).total_seconds()\n if iat <= gap_interval:\n current_trace = pre_traces[i]\n while current_trace < pre_traces[i+1]:\n interval_list.append(current_trace)\n current_trace = current_trace + datetime.timedelta(0,1)\n else:\n interval_list.append(pre_traces[i])\n\n if i == len(pre_traces)-2:\n interval_list.append(pre_traces[i+1])\n\n return interval_list",
"def detect_time_gaps(st, min_samples=10, epsilon=1e-20, thresh_disc=100):\n # Read data\n tdata = st[0].data\n indz = np.where(abs(tdata) < epsilon)[0] # indices where we have 0\n diff_indz = indz[min_samples:] - indz[0:-min_samples] # Need min_samples consecutive samples with 0's to identify as time gap\n ind_des = np.where(diff_indz == min_samples)[0] # desired indices: value is equal to min_samples in the time gap\n ind_gap = indz[ind_des] # indices of the time gaps\n gap_start_ind = []\n gap_end_ind = []\n if (0 == len(ind_gap)): \n num_gaps = 0\n else:\n print \"Warning: %s time gap(s) with zeros found\"%len(ind_gap)\n # May have more than 1 time gap\n ind_diff = np.diff(ind_gap) # discontinuities in indices of the time gaps, if there is more than 1 time gap\n ind_disc = np.where(ind_diff > thresh_disc)[0]\n # N-1 time gaps\n curr_ind_start = ind_gap[0]\n for igap in range(len(ind_disc)): # do not enter this loop if ind_disc is empty\n gap_start_ind.append(curr_ind_start)\n last_index = ind_gap[ind_disc[igap]] + min_samples\n gap_end_ind.append(last_index)\n curr_ind_start = ind_gap[ind_disc[igap]+1] # update for next iteration\n # Last time gap\n gap_start_ind.append(curr_ind_start)\n gap_end_ind.append(ind_gap[-1] + min_samples)\n num_gaps = len(gap_start_ind)\n\n return [num_gaps, gap_start_ind, gap_end_ind]",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def gap_tracer(df: pd.DataFrame, runs: int = 6, gap_freq: int = 1) -> pd.DataFrame:\n df = df.copy()\n df[\"timestamp\"] = df.index\n df[\"gap\"] = df.timestamp.diff()\n df[\"gap_bool\"] = df[\"gap\"] > df[\"gap\"].mode()[0]\n df[\"from\"] = df[\"timestamp\"].shift()\n # all gaps in timeseries\n gaps = df[df[\"gap_bool\"]]\n\n # non standard gaps\n out = pd.DataFrame({\"from\": gaps[\"from\"], \"to\": gaps[\"timestamp\"]}).reset_index(\n drop=True\n )\n out[\"duration\"] = out[\"to\"] - out[\"from\"]\n out = out[1:]\n\n out[\"from_time\"] = out[\"from\"].apply(lambda x: x.time())\n\n # most frequent time cutoff (end of day)\n def time_cut(df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.Timestamp]:\n df = df.copy()\n cutoff_time = df[\"from_time\"].mode()[0]\n gapless = df[(df[\"from_time\"] != cutoff_time)].reset_index(drop=True)\n return gapless, cutoff_time\n\n # non standard gap duration based on frequency for every time cutoff\n def duration_cut(cf: pd.Timestamp) -> pd.DataFrame:\n duration_counts = out[out[\"from_time\"] == cf].duration.value_counts()\n duration_count_thresholds = set(\n duration_counts[duration_counts > gap_freq].index\n )\n suspicious = out[\n (out[\"from_time\"] == cf)\n & out[\"duration\"].apply(lambda x: x not in duration_count_thresholds)\n ].reset_index(drop=True)\n return suspicious\n\n cutoffs = []\n\n non_standard_gaps = out\n\n for _ in range(runs):\n try:\n non_standard_gaps, cutoff = time_cut(non_standard_gaps)\n cutoffs.append(cutoff)\n except KeyError:\n break\n\n suspicious = [duration_cut(cf) for cf in cutoffs]\n suspicious.append(non_standard_gaps)\n out_df = pd.concat(suspicious).sort_values(\"from\").reset_index(drop=True)\n del out_df[\"from_time\"]\n\n return out_df",
"def gaps(args):\n from jcvi.formats.base import DictFile\n from jcvi.apps.base import popen\n from jcvi.utils.cbook import percentage\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\"--bdist\", default=0, type=\"int\", help=\"Base pair distance\")\n opts, args = p.parse_args(args)\n\n if len(args) != 3:\n sys.exit(not p.print_help())\n\n idsfile, frfile, gapsbed = args\n bdist = opts.bdist\n d = DictFile(frfile, keypos=1, valuepos=2)\n bedfile = idsfile + \".bed\"\n fw = open(bedfile, \"w\")\n fp = open(idsfile)\n total = 0\n for row in fp:\n id = row.strip()\n hit = d[id]\n tag, pos = get_tag(hit, None)\n seqid, start, end = pos\n start, end = max(start - bdist, 1), end + bdist\n print(\"\\t\".join(str(x) for x in (seqid, start - 1, end, id)), file=fw)\n total += 1\n fw.close()\n\n cmd = \"intersectBed -a {0} -b {1} -v | wc -l\".format(bedfile, gapsbed)\n not_in_gaps = popen(cmd).read()\n not_in_gaps = int(not_in_gaps)\n in_gaps = total - not_in_gaps\n print(\"Ids in gaps: {1}\".format(total, percentage(in_gaps, total)), file=sys.stderr)",
"def compute_intertap_gap(intervals):\n import numpy as np\n\n n = len(intervals)\n\n fast10 = intervals[0:np.round(0.10 * n)]\n fast25 = intervals[0:np.round(0.25 * n)]\n fast50 = intervals[0:np.round(0.50 * n)]\n slow10 = intervals[n - np.round(0.10 * n):n]\n slow25 = intervals[n - np.round(0.25 * n):n]\n slow50 = intervals[n - np.round(0.50 * n):n]\n\n delta10 = np.mean(fast10) - np.mean(slow10)\n delta25 = np.mean(fast25) - np.mean(slow25)\n delta50 = np.mean(fast50) - np.mean(slow50)\n\n return delta10, delta25, delta50",
"def seq_numbers_diff(start_seq: int, end_seq: int) -> int:\n if start_seq < 0 or end_seq < 0:\n return None\n if start_seq > end_seq:\n return end_seq + (SEQ_NUM_MOD_CONST - start_seq)\n else:\n return end_seq - start_seq",
"def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation",
"def prime_gap_plots(maxp, gap_sizes):\n P = prime_range(maxp + 1)\n v = [[(0, 0)] for i in gap_sizes]\n k = dict([(g, i) for i, g in enumerate(gap_sizes)])\n for i in range(len(P) - 1):\n g = P[i + 1] - P[i]\n if g in k:\n w = v[k[g]]\n w.append((P[i + 1], w[-1][1]))\n w.append((P[i + 1], w[-1][1] + 1))\n return v",
"def gap_length(L_t=79.6, p=75, fmax=1e12, p1=database['K+'],\r\n p2=database['pi+'], p3=database['p+'], l=2.74,\r\n E=1e6, plot=True, nf=200, delta_p=1.6e-2, n=100,\r\n just_pi=False, set_freq=5.7e9):\r\n gap_length = np.linspace(0, L_t-(2*l), n)[:-1]\r\n min_disp, freq = [], []\r\n for g in gap_length:\r\n if set_freq == None:\r\n f = freq_defl_comp(fmax, p, p1, p2, p3, L_t, l, E, plot=False,\r\n details=False, n=nf, delta_p=delta_p, fmin=0,\r\n just_pi=just_pi)\r\n else:\r\n f = set_freq\r\n freq.append(f)\r\n if just_pi == True:\r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n min_disp.append(abs_deflection(d2, g))\r\n if just_pi == False: \r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n d3 = ang_deflection(p, f, p1, p3, L_t-g, l, E, delta_p=delta_p)\r\n disp_2 = abs_deflection(d2, g)\r\n disp_3 = abs_deflection(d3, g)\r\n min_disp.append(np.min([disp_2, disp_3]))\r\n freq = np.array(freq)\r\n min_disp = np.array(min_disp)\r\n ratio = min_disp/freq\r\n ratio *= np.max(freq)/np.max(ratio)\r\n opt_freq_gap_index = np.argmax(ratio)\r\n opt_freq_gap_disp = [freq[opt_freq_gap_index], gap_length[opt_freq_gap_index], min_disp[opt_freq_gap_index]]\r\n if plot == True: \r\n fig = plt.figure(figsize=[9, 5])\r\n ax1 = fig.add_subplot(1, 1, 1)\r\n line1 = ax1.plot(gap_length, min_disp, 'r', alpha=0.5, label=f'minimum displacement')\r\n ax2 = ax1.twinx()\r\n line2 = ax2.plot(gap_length, freq, 'b', alpha=0.5, label=f'optimum frequency')\r\n line3 = ax2.plot(gap_length, ratio, 'g', alpha=0.5, label=f'ratio')\r\n ax1.set_xlabel('Gap Length / m', fontsize=20)\r\n ax1.set_xlim(0, L_t-(2*l))\r\n text = r'Minimum $D_{'\r\n text += p2.name[:-1] + '/' + p3.name[:-1]\r\n text += '}$ / mm'\r\n ax1.set_ylabel(text, fontsize=20, color='r')\r\n ax1.tick_params(axis='y', labelcolor='r')\r\n ax2.set_ylabel('Frequency / Hz', fontsize=20, color='b', alpha=1)\r\n ax2.tick_params(axis='y', labelcolor='b')\r\n ax1.set_ylim(0)\r\n ax2.set_ylim(0)\r\n leg = line1 + line2 + line3\r\n labs = [l.get_label() for l in leg]\r\n ax1.legend(leg, labs, loc=0, fontsize=12)\r\n fig.tight_layout()\r\n plt.show()\r\n return opt_freq_gap_disp",
"def fraction(amount, start, stop, truncated, sequence):\n ratio = stop\n for x in range(start, amount):\n y = abs(round(ratio / (abs(x) + 1)))\n if truncated and y >= stop:\n sequence.append(stop)\n elif y < start:\n sequence.append(start)\n else:\n sequence.append(y)\n return sequence",
"def ungap_feature_ends(feat, rec):\n if feat.location.start < 0:\n feat.location = FeatureLocation(0, feat.location.end, feat.location.strand)\n\n if feat.location.end < 0:\n feat.location = FeatureLocation(feat.location.start, 0, feat.location.strand)\n\n if feat.location.start > feat.location.end:\n feat.location = FeatureLocation(feat.location.end, feat.location.start, feat.location.strand)\n\n if type(feat.location) == CompoundLocation:\n parts = []\n for part in feat.location.parts:\n part = ungap_feature_ends(SeqFeature(part), rec)\n parts.append(part.location)\n feat.location = CompoundLocation(parts, feat.location.operator)\n\n elif type(feat.location) == FeatureLocation:\n extract = str(feat.extract(rec.seq))\n front_gaps = re.search(\"^-+\", extract)\n if front_gaps:\n if not feat.location.strand or feat.location.strand == 1:\n new_start = feat.location.start + len(front_gaps.group(0))\n feat.location = FeatureLocation(new_start, feat.location.end, 1)\n else:\n new_end = feat.location.end - len(front_gaps.group(0))\n feat.location = FeatureLocation(feat.location.start, new_end, -1)\n\n rear_gaps = re.search(\"-+$\", extract)\n if rear_gaps:\n if not feat.location.strand or feat.location.strand == 1:\n new_end = feat.location.end - len(rear_gaps.group(0))\n feat.location = FeatureLocation(feat.location.start, new_end, 1)\n else:\n new_start = feat.location.start + len(rear_gaps.group(0))\n feat.location = FeatureLocation(new_start, feat.location.end, -1)\n else:\n raise TypeError(\"FeatureLocation or CompoundLocation object required.\")\n return feat",
"def test_gap_vector(self):\n\n def g(x):\n return self.RNA(x).gap_vector()\n\n self.assertEqual(g(\"\"), [])\n self.assertEqual(g(\"ACUGUCAGUACGHCSDKCCUCCDNCNS\"), [False] * 27)\n self.assertEqual(\n g(\"GUACGUAACAKADC-SDAHADSAK\"),\n list(map(bool, list(map(int, \"000000000000001000000000\")))),\n )\n self.assertEqual(g(\"-DSHSUHDSS\"), list(map(bool, list(map(int, \"1000000000\")))))\n self.assertEqual(\n g(\"UACHASCAGDS-\"), list(map(bool, list(map(int, \"000000000001\"))))\n )\n self.assertEqual(\n g(\"---CGAUgCAU---ACGHc---ACGUCAGU--?\"),\n list(map(bool, list(map(int, \"111000000001110000011100000000111\")))),\n )",
"def diff_flag(data):\n dp = np.abs(np.diff(data))\n dp = np.concatenate(([0], dp))\n return dp",
"def gap(l):\n if l < 3:\n return 0\n\n # places one person in the middle of the gap,\n # and starts over on the new smaller gaps on either side.\n return gap(int(l / 2)) + 1 + gap(ceil(l / 2) - 1)",
"def frac_positions():\n return [f\"{x},{y}\" for x in range(0, 15, 4) for y in range(0, 15, 4) if x != 0 or y != 0]",
"def gaps(args):\n from jcvi.formats.sizes import agp\n from jcvi.formats.agp import mask, build\n\n p = OptionParser(gaps.__doc__)\n p.add_option(\n \"--split\", default=False, action=\"store_true\", help=\"Generate .split.fasta\"\n )\n p.set_mingap(default=100)\n p.set_cpus()\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (inputfasta,) = args\n mingap = opts.mingap\n split = opts.split\n prefix = inputfasta.rsplit(\".\", 1)[0]\n bedfile = prefix + \".gaps.bed\"\n\n if need_update(inputfasta, bedfile):\n write_gaps_bed(inputfasta, prefix, mingap, opts.cpus)\n\n if split:\n splitfile = prefix + \".split.fasta\"\n oagpfile = prefix + \".splitobject.agp\"\n cagpfile = prefix + \".splitcomponent.agp\"\n\n if need_update((inputfasta, bedfile), splitfile):\n\n sizesagpfile = agp([inputfasta])\n\n maskedagpfile = mask([sizesagpfile, bedfile, \"--splitobject\"])\n shutil.move(maskedagpfile, oagpfile)\n logging.debug(\"AGP file written to `{0}`.\".format(oagpfile))\n\n maskedagpfile = mask([sizesagpfile, bedfile, \"--splitcomponent\"])\n shutil.move(maskedagpfile, cagpfile)\n logging.debug(\"AGP file written to `{0}`.\".format(cagpfile))\n\n build([oagpfile, inputfasta, splitfile])\n cleanup(sizesagpfile)\n\n return splitfile, oagpfile, cagpfile",
"def throw_random_gap_list(lengths, mask, save_interval_func, allow_overlap=False):\n # Use mask to find the gaps; gaps is a list of (length,start,end)\n lengths = [length for length in lengths if length > 0]\n min_length = min(lengths)\n gaps = []\n start = end = 0\n while True:\n start = mask.next_clear(end)\n if start == mask.size:\n break\n end = mask.next_set(start)\n if end - start >= min_length:\n gaps.append((end - start, start, None))\n # Sort (long regions first)\n gaps.sort()\n gaps.reverse()\n # Throw\n throw_random_private(lengths, gaps, save_interval_func, allow_overlap, three_args=False)"
] | [
"0.7225307",
"0.67301106",
"0.66909367",
"0.6637069",
"0.63543224",
"0.63248473",
"0.625972",
"0.6218654",
"0.621057",
"0.61792654",
"0.6112803",
"0.6037798",
"0.5851911",
"0.58475983",
"0.58424854",
"0.5677977",
"0.5675696",
"0.56663543",
"0.5583191",
"0.5574306",
"0.5514928",
"0.55134344",
"0.5474972",
"0.53353125",
"0.53298944",
"0.5303647",
"0.5300291",
"0.52988833",
"0.52941024",
"0.5286232"
] | 0.73904467 | 0 |
Sequence frac_same_non_gaps should return similarities at nongaps | def test_frac_same_non_gaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("AGGG")
s3 = self.RNA("GGGG")
s4 = self.RNA("AG--GA-G")
s5 = self.RNA("CU--CU-C")
s6 = self.RNA("AC--GC-G")
s7 = self.RNA("--------")
s8 = self.RNA("AAAA----")
s9 = self.RNA("A-GG-A-C")
e = self.RNA("")
def test(x, y, z):
return self.assertFloatEqual(x.frac_same_non_gaps(y), z)
test(s1, s2, 0.25)
test(s1, s3, 0)
test(s2, s3, 0.75)
test(s1, s4, 0.5)
test(s4, s5, 0)
test(s4, s6, 0.6)
test(s4, s7, 0)
test(s4, s8, 0.5)
test(s4, s9, 2 / 3.0)
test(e, s4, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same_gaps(s1), 1)\n self.assertEqual(s1.frac_same_gaps(s2), 1)\n self.assertEqual(s1.frac_same_gaps(s3), 0)\n self.assertEqual(s1.frac_same_gaps(s4), 0.5)\n self.assertEqual(s1.frac_same_gaps(s5), 0.5)\n self.assertEqual(s1.frac_same_gaps(s6), 0.5)\n self.assertEqual(s1.frac_same_gaps(s7), 0)\n self.assertEqual(s1.frac_same_gaps(e), 0)\n self.assertEqual(s3.frac_same_gaps(s3), 1)\n self.assertEqual(s3.frac_same_gaps(s4), 0.5)\n self.assertEqual(s3.frac_same_gaps(s7), 1.0)\n self.assertEqual(e.frac_same_gaps(e), 0.0)\n self.assertEqual(s4.frac_same_gaps(s5), 0.0)\n self.assertEqual(s4.frac_same_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0)",
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)",
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)",
"def test_frac_similar(self):\n transitions = dict.fromkeys(\n [\n (\"A\", \"A\"),\n (\"A\", \"G\"),\n (\"G\", \"A\"),\n (\"G\", \"G\"),\n (\"U\", \"U\"),\n (\"U\", \"C\"),\n (\"C\", \"U\"),\n (\"C\", \"C\"),\n ]\n )\n\n s1 = self.RNA(\"UCAGGCAA\")\n s2 = self.RNA(\"CCAAAUGC\")\n s3 = self.RNA(\"GGGGGGGG\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_similar(y, transitions), z)\n\n test(e, e, 0)\n test(s1, e, 0)\n test(s1, s1, 1)\n test(s1, s2, 7.0 / 8)\n test(s1, s3, 5.0 / 8)\n test(s2, s3, 4.0 / 8)",
"def test_nongaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").nongaps(), array([1, 1]))\n self.assertEqual(sc(\"T-\").nongaps(), array([1, 0]))",
"def test_frac_same(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same(e), 0)\n self.assertEqual(s1.frac_same(s2), 0.25)\n self.assertEqual(s1.frac_same(s3), 0)\n self.assertEqual(s1.frac_same(s4), 1.0) # note truncation",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def compare_gene_predictors(GM_genes, Glim_genes):\n GM_starts = []\n Glim_starts = []\n GM_only = []\n Glim_only = []\n shared_starts = []\n # GM_stops = []\n # Glim_stops = []\n Glim_unique = 0\n GM_unique = 0\n\n for i in range(1,GM_genes[\"total genes\"]+1):\n GM_starts.append(GM_genes[\"gene\" + str(i)][\"start\"])\n for j in range(1,Glim_genes[\"total genes\"]+1):\n Glim_starts.append (Glim_genes[\"gene\"+ str(j)][\"start\"])\n for i in range(0,len(GM_starts)):\n if GM_starts[i] not in Glim_starts:\n print(\"start at pos. \" + str(GM_starts[i]) + \" is unique to GM genes\")\n GM_only.append(GM_starts[i])\n GM_unique += 1\n else:\n shared_starts.append(GM_starts[i])\n for j in range(0,len(Glim_starts)):\n if Glim_starts[j] not in GM_starts:\n print (\"start at pos. \" + str(Glim_starts[j]) + \" is unique to Glim genes\")\n Glim_only.append(Glim_starts[j])\n Glim_unique += 1\n else:\n if GM_starts[j] not in shared_starts:\n shared_starts.append(GM_starts[j])\n shared_starts.sort()\n print (\"Number of unique Glimmer starts = \" + str(Glim_unique))\n print (\"Number of unique GM starts = \" + str(GM_unique))\n print(\"Shared starts =\\n\")\n for k in range(0,len(shared_starts)):\n print (shared_starts[k])",
"def test_ne():\n # Define some universal gsps\n gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3)\n\n # Pixel. Params include scale, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Pixel(scale=1.0),\n galsim.Pixel(scale=1.1),\n galsim.Pixel(scale=1.0, flux=1.1),\n galsim.Pixel(scale=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # Box. Params include width, height, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.Box(width=1.0, height=1.0),\n galsim.Box(width=1.1, height=1.0),\n galsim.Box(width=1.0, height=1.1),\n galsim.Box(width=1.0, height=1.0, flux=1.1),\n galsim.Box(width=1.0, height=1.0, gsparams=gsp)]\n all_obj_diff(gals)\n\n # TopHat. Params include radius, flux, gsparams.\n # gsparams.\n # The following should all test unequal:\n gals = [galsim.TopHat(radius=1.0),\n galsim.TopHat(radius=1.1),\n galsim.TopHat(radius=1.0, flux=1.1),\n galsim.TopHat(radius=1.0, gsparams=gsp)]\n all_obj_diff(gals)",
"def match_gc_content(pos_one_hot, neg_one_hot, neg_pos_ratio=1):\n N, L, A = pos_one_hot.shape\n gc_pos = np.sum(np.sum(pos_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n gc_neg = np.sum(np.sum(neg_one_hot[:,:,[1,2]], axis=2), axis=1)/L\n print(' Average GC content for positive sequences: %.3f'%(np.mean(gc_pos)))\n print(' Average GC content for negative sequences: %.3f'%(np.mean(gc_neg)))\n\n pos_index = np.argsort(gc_pos)\n neg_index = np.argsort(gc_neg)\n num_neg = len(neg_index)\n num_pos = len(pos_index)\n\n match_index = []\n if num_neg > num_pos:\n k = 0\n status = True\n for i in pos_index:\n for j in range(k, num_neg):\n if gc_pos[i] < gc_neg[neg_index[j]]:\n if k > num_neg:\n status = False\n break\n else:\n # print(\"%.2f vs %.2f\"%(gc_pos[i], gc_neg[neg_index[j]]))\n match_index.append(neg_index[j])\n k = j+1\n break\n if not status:\n break\n\n remainder = int(num_pos*neg_pos_ratio) - len(match_index)\n print(' Found %d GC-matched sequences.'%(len(match_index)))\n if remainder > 0:\n print(' Adding %d more random negative sequences.'%(remainder))\n remain_index = np.array(list(set(range(num_neg)) - set(match_index)))\n index = np.random.permutation(len(remain_index))[:remainder] \n # index = np.argsort(gc_neg[remain_index])[::-1]\n for n in remain_index[index[:remainder]]:\n match_index.append(n)\n \n match_index = np.array(match_index)\n print(' Average GC content for sub-sampled negative sequences: %.3f'%(np.mean(gc_neg[match_index])))\n\n return neg_one_hot[match_index], match_index",
"def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def all_but_n_ver(seq, n):\r\n num_AnotB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AnotB):\r\n num_AnotB += 1\r\n return Quantifier.T if num_AnotB == n else Quantifier.F",
"def get_similarities(tags):\n return [(a, b)\n for (a, b) in itertools.permutations(tags, 2)\n if difflib.SequenceMatcher(a=a.lower(), b=b.lower()).ratio() > SIMILAR\n and a != b\n and b.endswith('s')]",
"def similarparts(imagparts):\n dupl = []\n global opt\n l = len(imagparts[0])-1\n \n for i in range(len(imagparts)-1): \n difs = sum(abs(x-y) for x,y in zip(imagparts[i][:l],imagparts[i+1][:l]))\n mean = float(sum(imagparts[i][:l])) / l\n dev = float(sum(abs(mean-val) for val in imagparts[i][:l])) / l\n if dev/mean >= float(opt.blcoldev):\n if difs <= int(opt.blsim):\n if imagparts[i] not in dupl:\n dupl.append(imagparts[i])\n if imagparts[i+1] not in dupl:\n dupl.append(imagparts[i+1])\n\n return dupl",
"def equal_number_ver(seq):\r\n num_AB, num_AnotB, num_BnotA = 0, 0, 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n elif np.array_equal(item, Quantifier.AnotB):\r\n num_AnotB += 1\r\n elif np.array_equal(item, Quantifier.BnotA):\r\n num_BnotA += 1\r\n return Quantifier.T if num_AnotB == num_BnotA else Quantifier.F",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def similar(g1, g2):\r\n return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))",
"def notall_ver(seq):\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AnotB):\r\n return Quantifier.T\r\n return Quantifier.F",
"def naive_2mm(p, t):\n\toccurence = []\n\tfor i in range(len(t)-len(p) + 1):\n\t\tmatch = True\n\t\tunmatch = 0\n\t\tfor j in range(len(p)):\n\t\t\tif not p[j] == t[i+j]:\n\t\t\t\tunmatch += 1\n\t\t\t\tif unmatch > 2:\n\t\t\t\t\tmatch = False\n\t\t\t\t\tbreak\n\t\tif match:\n\t\t\toccurence.append(i)\n\treturn occurence",
"def compare(seq1, seq2):\n if seq1 == seq2:\n return 1\n len_diff = len(seq1) / len(seq2)\n if len_diff > 1:\n len_diff = 1 / len_diff\n\n ngrams1 = {tuple(ng) for ng in get_all_ngrams(seq1)}\n ngrams2 = {tuple(ng) for ng in get_all_ngrams(seq2)}\n\n overall = len(ngrams1 & ngrams2) / len(ngrams1 | ngrams2)\n if overall == 1 or overall == 0:\n return overall\n\n try:\n max_match = len(max(ngrams1 & ngrams2, key=len)) / len(seq1)\n except ValueError:\n return 0\n\n return (len_diff + max_match + overall) / 3",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def exactly_n_ver(seq, n):\r\n num_AB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AB):\r\n num_AB += 1\r\n return Quantifier.T if num_AB == n else Quantifier.F",
"def remove_duplicates_by_matching():\n # 1) and 2)\n all_blobs = vision.pqr_r\n all_angles = np.zeros(0)\n right = np.transpose(vision.pqr_r)\n left = np.transpose(vision.pqr_l)\n\n if not right.size and not left.size:\n return (0, 0)\n\n if not right.size:\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_l, all_angles)\n\n if not left.size:\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n return (vision.pqr_r, all_angles)\n\n\n for r in right:\n angle = np.arctan2(r[1], r[0]) * 180 / pi\n all_angles = np.append(all_angles, angle)\n cand_r = np.zeros((3,1))\n if angle < 15:\n cand_r = np.append(cand_r, [[r[0]], [r[1]], [r[2]]], axis=1)\n cand_r = np.delete(cand_r, 0, axis=1)\n cand_r = np.transpose(cand_r)\n\n for l in left:\n angle = np.arctan2(l[1], l[0]) * 180 / pi\n dot = 0\n if angle > -15:\n dl = max(0.001, np.linalg.norm(l))\n for r in cand_r:\n dr = max(0.001, np.linalg.norm(r))\n dot = np.dot(r, l) / (dr * dl)\n print(dot)\n if dot > 0.9:\n continue\n \n if dot <= 0.9:\n all_blobs = np.append(all_blobs, [[l[0]], [l[1]], [l[2]]], axis=1)\n all_angles = np.append(all_angles, angle)\n\n # make even number of blobs if necessary\n #if all_blobs.shape[1] % 2:\n # all_blobs = np.delete(all_blobs, 0, axis=1)\n # all_angles = np.delete(all_angles, 0)\n\n\n\n return (all_blobs, all_angles)",
"def calculateOverlap(mfaPairs, exons1, exons2):\n exonSize = 0\n noMatchToExons = 0\n noMatchToNonExons = 0\n noMatchToGapsInExons = 0\n noMatchToGapsNotInExons = 0\n \n p = -100000\n for i in exons1:\n exonSize += i[1] - i[0]\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n l = k\n if k == GAP:\n l = p \n for m in exons2:\n if(l >= m[0] and l < m[1]):\n if k == GAP:\n noMatchToGapsInExons += 1\n else:\n noMatchToExons += 1\n break\n else:\n if k == GAP:\n noMatchToGapsNotInExons += 1\n else:\n noMatchToNonExons += 1\n if k != GAP:\n p = k\n return (exonSize, noMatchToExons, noMatchToNonExons,\\\n noMatchToGapsInExons, noMatchToGapsNotInExons)",
"def negative_graph_match_test():\n testInputs = [ # noqa: N806\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n <http://example.org> :rel\n [ :label \"Same\" ].\n \"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n <http://example.org> :rel\n [ :label \"Same\" ],\n [ :label \"Same\" ].\n \"\"\"\n ),\n False,\n ],\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n <http://example.org> :rel\n <http://example.org/a>.\n \"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n <http://example.org> :rel\n <http://example.org/a>,\n <http://example.org/a>.\n \"\"\"\n ),\n True,\n ],\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n :linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],\n [ :related [ :related :linear_two_step_symmatry_end]].\"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n :linear_two_step_symmetry_start :related [ :related [ :related :linear_two_step_symmatry_end]],\n [ :related [ :related :linear_two_step_symmatry_end]].\"\"\"\n ),\n True,\n ],\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ].\"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :rel [\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ];\n ].\"\"\"\n ),\n False,\n ],\n # This test fails because the algorithm purposefully breaks the symmetry of symetric\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ].\"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ].\"\"\"\n ),\n True,\n ],\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :label \"foo\";\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ].\"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:a :rel [\n :rel [\n :rel [\n :rel [\n :rel _:a;\n ];\n ];\n ];\n ].\"\"\"\n ),\n False,\n ],\n [\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:0001 :rel _:0003, _:0004.\n _:0002 :rel _:0005, _:0006.\n _:0003 :rel _:0001, _:0007, _:0010.\n _:0004 :rel _:0001, _:0009, _:0008.\n _:0005 :rel _:0002, _:0007, _:0009.\n _:0006 :rel _:0002, _:0008, _:0010.\n _:0007 :rel _:0003, _:0005, _:0009.\n _:0008 :rel _:0004, _:0006, _:0010.\n _:0009 :rel _:0004, _:0005, _:0007.\n _:0010 :rel _:0003, _:0006, _:0008.\n \"\"\"\n ),\n str(\n \"\"\"@prefix : <http://example.org/ns#> .\n _:0001 :rel _:0003, _:0004.\n _:0002 :rel _:0005, _:0006.\n _:0003 :rel _:0001, _:0007, _:0010.\n _:0008 :rel _:0004, _:0006, _:0010.\n _:0009 :rel _:0004, _:0005, _:0007.\n _:0010 :rel _:0003, _:0006, _:0008.\n _:0004 :rel _:0001, _:0009, _:0008.\n _:0005 :rel _:0002, _:0007, _:0009.\n _:0006 :rel _:0002, _:0008, _:0010.\n _:0007 :rel _:0003, _:0005, _:0009.\n \"\"\"\n ),\n True,\n ],\n ]\n\n def fn(rdf1, rdf2, identical):\n digest1 = get_digest_value(rdf1, \"text/turtle\")\n digest2 = get_digest_value(rdf2, \"text/turtle\")\n print(rdf1)\n print(digest1)\n print(rdf2)\n print(digest2)\n assert (digest1 == digest2) == identical\n\n for inputs in testInputs:\n yield fn, inputs[0], inputs[1], inputs[2]",
"def dynamic(a, b):\n m = len(a)\n n = len(b)\n d = [[0 for i in range(n)] for j in range(m)] # d[i][j] is length of common sequence of consecutive\n prev = [-1 for j in range(n)] # numbers that ends with a[i] == b[j]\n global_max = 0\n global_pos = -1\n for i in range(0, m):\n for j in range(0, n): # iterate through all the elements by dual circle\n if a[i] == b[j]: # if pair is equal then check if there is sequence that ends with a[i]-1\n max_len = 0 # find longest sequence ends with a[i]-1\n max_prev = -1\n for k in range(i+1):\n for l in range(j+1):\n if k == i and l == j:\n continue\n if d[k][l] > max_len and a[k] == b[l] == a[i] - 1:\n max_len = d[k][l]\n max_prev = l\n d[i][j] = max_len + 1\n if d[i][j] > global_max:\n global_max = d[i][j]\n global_pos = j\n prev[j] = max_prev\n\n res = [] # rebuild the answer\n while global_pos != -1:\n res.append(b[global_pos])\n global_pos = prev[global_pos]\n\n return res[::-1]"
] | [
"0.73611933",
"0.6853093",
"0.6707912",
"0.64544857",
"0.61881554",
"0.6067325",
"0.6048163",
"0.59375685",
"0.59336567",
"0.5676094",
"0.55943984",
"0.5567829",
"0.5493283",
"0.5493283",
"0.54437655",
"0.54295635",
"0.54226124",
"0.5417089",
"0.53966296",
"0.53905",
"0.53888845",
"0.5342531",
"0.5332954",
"0.5275982",
"0.52746457",
"0.5266433",
"0.5248604",
"0.5242569",
"0.52416384",
"0.52413344"
] | 0.7459763 | 0 |
Sequence frac_diff_non_gaps should return differences at nongaps | def test_frac_diffNonGaps(self):
s1 = self.RNA("AAAA")
s2 = self.RNA("AGGG")
s3 = self.RNA("GGGG")
s4 = self.RNA("AG--GA-G")
s5 = self.RNA("CU--CU-C")
s6 = self.RNA("AC--GC-G")
s7 = self.RNA("--------")
s8 = self.RNA("AAAA----")
s9 = self.RNA("A-GG-A-C")
e = self.RNA("")
def test(x, y, z):
return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)
test(s1, s2, 0.75)
test(s1, s3, 1)
test(s2, s3, 0.25)
test(s1, s4, 0.5)
test(s4, s5, 1)
test(s4, s6, 0.4)
test(s4, s7, 0)
test(s4, s8, 0.5)
test(s4, s9, 1 / 3.0)
test(e, s4, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)",
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)",
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same_gaps(s1), 1)\n self.assertEqual(s1.frac_same_gaps(s2), 1)\n self.assertEqual(s1.frac_same_gaps(s3), 0)\n self.assertEqual(s1.frac_same_gaps(s4), 0.5)\n self.assertEqual(s1.frac_same_gaps(s5), 0.5)\n self.assertEqual(s1.frac_same_gaps(s6), 0.5)\n self.assertEqual(s1.frac_same_gaps(s7), 0)\n self.assertEqual(s1.frac_same_gaps(e), 0)\n self.assertEqual(s3.frac_same_gaps(s3), 1)\n self.assertEqual(s3.frac_same_gaps(s4), 0.5)\n self.assertEqual(s3.frac_same_gaps(s7), 1.0)\n self.assertEqual(e.frac_same_gaps(e), 0.0)\n self.assertEqual(s4.frac_same_gaps(s5), 0.0)\n self.assertEqual(s4.frac_same_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0)",
"def diff_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n out[0, :] = np.nan\n out[1:, :] = a[1:, :] - a[:-1, :]\n return out",
"def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation",
"def test_ndiff(self):\n print \"\\n\"\n for d in ndiff(a, b): print d",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def test_numprops_different_sign(self):\n # Perform diff.\n df = Differ(key=\"name\", deltas={\"energy\": Delta(\"+-\")})\n d = df.diff(*self.engines)\n # Calculate expected results.\n is_different = lambda a, b: a < 0 < b or b < 0 < a\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n # Check results.\n self.assertEqual(len(d[Differ.CHANGED]), changed)",
"def reverse_difference():",
"def test_nongaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").nongaps(), array([1, 1]))\n self.assertEqual(sc(\"T-\").nongaps(), array([1, 0]))",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def diff_1d_nb(a):\n out = np.empty_like(a, dtype=np.float_)\n out[0] = np.nan\n out[1:] = a[1:] - a[:-1]\n return out",
"def all_but_n_ver(seq, n):\r\n num_AnotB = 0\r\n for item in seq:\r\n if np.array_equal(item, Quantifier.AnotB):\r\n num_AnotB += 1\r\n return Quantifier.T if num_AnotB == n else Quantifier.F",
"def diff(self):\n\t\tif len(self.v) < 4:\n\t\t\treturn None\n\t\tif self.poli == None:\n\t\t\tself.generar_polinomio()\n\t\tif x != None:\n\t\t\treturn diff(self.poli)(x)\n\t\treturn diff(self.poli)",
"def calculateIndels(mfaPairs, regions):\n gapLength = 0\n gaps = [0]*1000\n for i in regions:\n for j in xrange(i[0], i[1]):\n k = mfaPairs[j]\n if k == GAP:\n gapLength += 1\n else:\n if gapLength != 0:\n gaps[gapLength] += 1\n gapLength = 0\n return gaps",
"def get_periods(a,t):\n ex = get_extrema(a,t)[1]\n \n l = ipol(ex,0)\n \n diff = np.diff(l)\n \n return diff",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def diff_frac(data_1, data_2):\n\n frac_1 = np.sum(data_1) / len(data_1)\n frac_2 = np.sum(data_2) / len(data_2)\n\n return frac_1 - frac_2",
"def array_diff(a, b):",
"def lagcrp(rec, lstlen):\n\n def check_pair(a, b):\n if (a>0 and b>0) and (a!=b):\n return True\n else:\n return False\n\n def compute_actual(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in range(0,len(rec)-1):\n a=rec[trial]\n b=rec[trial+1]\n if check_pair(a, b) and (a not in recalled) and (b not in recalled):\n arr[b-a]+=1\n recalled.append(a)\n return arr\n\n def compute_possible(rec, lstlen):\n arr=pd.Series(data=np.zeros((lstlen)*2),\n index=list(range(-lstlen,0))+list(range(1,lstlen+1)))\n recalled=[]\n for trial in rec:\n if np.isnan(trial):\n pass\n else:\n lbound=int(1-trial)\n ubound=int(lstlen-trial)\n chances=list(range(lbound,0))+list(range(1,ubound+1))\n for each in recalled:\n if each-trial in chances:\n chances.remove(each-trial)\n arr[chances]+=1\n recalled.append(trial)\n return arr\n\n actual = compute_actual(rec, lstlen)\n possible = compute_possible(rec, lstlen)\n crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)]\n crp.insert(int(len(crp) / 2), np.nan)\n return crp",
"def ndiffs(x, alpha = 0.05, max_lags = None, max_d = 2):\n x = x[~pd.isnull(x)]\n d = 0\n if alpha < 0.01:\n print(\"Specified alpha value is less than the minimum, setting alpha=0.01\")\n alpha = 0.01\n if alpha > 0.1:\n print(\"Specified alpha value is larger than the maximum, setting alpha=0.1\")\n alpha = 0.1\n if is_constant(x):\n final_d = d\n \n alpha = alpha\n if max_lags == None:\n max_lags = round(3 * np.sqrt(len(x))/13)\n warnings.simplefilter(\"ignore\")\n kpss_stat, p_value, lags, crit = kpss(x = x, lags = max_lags)\n \n while (alpha < kpss_stat) & (d <= max_d):\n d = d + 1\n x = np.diff(x)\n kpss_stat, p_value, lags, crit = kpss(x = x, lags = max_lags)\n if not is_constant(x): \n final_d = d\n \n if final_d > max_d:\n final_d = max_d\n \n return final_d",
"def delta_n(n, zeros):\n #return log(zeros[n]/2.0/pi/e)/2.0/pi*(zeros[n+1]-zeros[n])\n return log(zeros[n]/2.0/pi)/2.0/pi*(zeros[n+1]-zeros[n])",
"def revContFraction(a):\n N = 1\n D = a[len(a)-1]\n\n for i in range(len(a)-2, -1, -1):\n N += D * a[i]\n tmp = N\n N = D\n D = tmp\n return (N,D)",
"def diffsents(sa, sb):\n m = len(sa)\n n = len(sb)\n la = lb = 0\n ra = m - 1\n rb = n - 1\n while la < m and lb < n:\n if sa[la] == sb[lb]:\n la += 1\n lb += 1\n else:\n break\n while ra >= 0 and rb >= 0:\n if sa[ra] == sb[rb]:\n ra -= 1\n rb -= 1\n else:\n break\n while la > ra or lb > rb:\n # la -= 1\n ra += 1\n # lb -= 1\n rb += 1\n if la == ra == m or lb == rb == n:\n la -= 1\n ra -= 1\n lb -= 1\n rb -= 1\n assert 0 <= la <= ra < m, \"{}\\t{}\\t{}\\t{}\\t{}\".format(m, la, ra, sa, sb)\n assert 0 <= lb <= rb < n, \"{}\\t{}\\t{}\\t{}\\t{}\".format(n, lb, rb, sb, sa)\n # sa[la, ra+1], sb[lb, rb+1]\n return la, ra, lb, rb",
"def diffsents(sa, sb):\n m = len(sa)\n n = len(sb)\n la = lb = 0\n ra = m - 1\n rb = n - 1\n while la < m and lb < n:\n if sa[la] == sb[lb]:\n la += 1\n lb += 1\n else:\n break\n while ra >= 0 and rb >= 0:\n if sa[ra] == sb[rb]:\n ra -= 1\n rb -= 1\n else:\n break\n while la > ra or lb > rb:\n # la -= 1\n ra += 1\n # lb -= 1\n rb += 1\n if la == ra == m or lb == rb == n:\n la -= 1\n ra -= 1\n lb -= 1\n rb -= 1\n assert 0 <= la <= ra < m, \"{}\\t{}\\t{}\\t{}\\t{}\".format(m, la, ra, sa, sb)\n assert 0 <= lb <= rb < n, \"{}\\t{}\\t{}\\t{}\\t{}\".format(n, lb, rb, sb, sa)\n # sa[la, ra+1], sb[lb, rb+1]\n return la, ra, lb, rb",
"def test_numprops_different(self):\n # Perform diff.\n delta = 0.5\n df = Differ(key=\"name\", deltas={\"energy\": Delta(\"+-{:f}\".format(delta))})\n d = df.diff(*self.engines)\n # Calculate expected results.\n is_different = lambda a, b: abs(a - b) > delta\n changed = sum((int(is_different(e[0], e[1])) for e in self.energies))\n # Check results.\n self.assertEqual(len(d[Differ.CHANGED]), changed)",
"def _diff(stack_trend, pgp_trend):\n valid_mask = ((stack_trend != stack_nodata) & (pgp_trend != pgp_nodata))\n out_array = numpy.empty_like(stack_trend)\n out_array[:] = -9999\n out_array[valid_mask] = stack_trend[valid_mask] - pgp_trend[valid_mask]\n return out_array",
"def gaps(df):\n return [(round(df[i][\"o\"] - df[i - 1][\"c\"], 2)) for i in range(1, len(df))]",
"def gap_length(L_t=79.6, p=75, fmax=1e12, p1=database['K+'],\r\n p2=database['pi+'], p3=database['p+'], l=2.74,\r\n E=1e6, plot=True, nf=200, delta_p=1.6e-2, n=100,\r\n just_pi=False, set_freq=5.7e9):\r\n gap_length = np.linspace(0, L_t-(2*l), n)[:-1]\r\n min_disp, freq = [], []\r\n for g in gap_length:\r\n if set_freq == None:\r\n f = freq_defl_comp(fmax, p, p1, p2, p3, L_t, l, E, plot=False,\r\n details=False, n=nf, delta_p=delta_p, fmin=0,\r\n just_pi=just_pi)\r\n else:\r\n f = set_freq\r\n freq.append(f)\r\n if just_pi == True:\r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n min_disp.append(abs_deflection(d2, g))\r\n if just_pi == False: \r\n d2 = ang_deflection(p, f, p1, p2, L_t-g, l, E, delta_p=delta_p)\r\n d3 = ang_deflection(p, f, p1, p3, L_t-g, l, E, delta_p=delta_p)\r\n disp_2 = abs_deflection(d2, g)\r\n disp_3 = abs_deflection(d3, g)\r\n min_disp.append(np.min([disp_2, disp_3]))\r\n freq = np.array(freq)\r\n min_disp = np.array(min_disp)\r\n ratio = min_disp/freq\r\n ratio *= np.max(freq)/np.max(ratio)\r\n opt_freq_gap_index = np.argmax(ratio)\r\n opt_freq_gap_disp = [freq[opt_freq_gap_index], gap_length[opt_freq_gap_index], min_disp[opt_freq_gap_index]]\r\n if plot == True: \r\n fig = plt.figure(figsize=[9, 5])\r\n ax1 = fig.add_subplot(1, 1, 1)\r\n line1 = ax1.plot(gap_length, min_disp, 'r', alpha=0.5, label=f'minimum displacement')\r\n ax2 = ax1.twinx()\r\n line2 = ax2.plot(gap_length, freq, 'b', alpha=0.5, label=f'optimum frequency')\r\n line3 = ax2.plot(gap_length, ratio, 'g', alpha=0.5, label=f'ratio')\r\n ax1.set_xlabel('Gap Length / m', fontsize=20)\r\n ax1.set_xlim(0, L_t-(2*l))\r\n text = r'Minimum $D_{'\r\n text += p2.name[:-1] + '/' + p3.name[:-1]\r\n text += '}$ / mm'\r\n ax1.set_ylabel(text, fontsize=20, color='r')\r\n ax1.tick_params(axis='y', labelcolor='r')\r\n ax2.set_ylabel('Frequency / Hz', fontsize=20, color='b', alpha=1)\r\n ax2.tick_params(axis='y', labelcolor='b')\r\n ax1.set_ylim(0)\r\n ax2.set_ylim(0)\r\n leg = line1 + line2 + line3\r\n labs = [l.get_label() for l in leg]\r\n ax1.legend(leg, labs, loc=0, fontsize=12)\r\n fig.tight_layout()\r\n plt.show()\r\n return opt_freq_gap_disp",
"def n_neg(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == -1:\n running_total += 1\n return running_total if running_total > 0 else 1"
] | [
"0.7439105",
"0.6938618",
"0.67893827",
"0.6030237",
"0.603009",
"0.5993497",
"0.57819146",
"0.57747155",
"0.57746786",
"0.567172",
"0.56545997",
"0.5574829",
"0.5558459",
"0.55300987",
"0.5500124",
"0.54575205",
"0.5447627",
"0.5424767",
"0.5401605",
"0.5380952",
"0.5379418",
"0.5347604",
"0.53405774",
"0.53250796",
"0.53250796",
"0.532178",
"0.53101903",
"0.529151",
"0.5278321",
"0.5260829"
] | 0.7880588 | 0 |
Sequence frac_similar should return the fraction similarity | def test_frac_similar(self):
transitions = dict.fromkeys(
[
("A", "A"),
("A", "G"),
("G", "A"),
("G", "G"),
("U", "U"),
("U", "C"),
("C", "U"),
("C", "C"),
]
)
s1 = self.RNA("UCAGGCAA")
s2 = self.RNA("CCAAAUGC")
s3 = self.RNA("GGGGGGGG")
e = self.RNA("")
def test(x, y, z):
return self.assertFloatEqual(x.frac_similar(y, transitions), z)
test(e, e, 0)
test(s1, e, 0)
test(s1, s1, 1)
test(s1, s2, 7.0 / 8)
test(s1, s3, 5.0 / 8)
test(s2, s3, 4.0 / 8) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_frac_same(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same(e), 0)\n self.assertEqual(s1.frac_same(s2), 0.25)\n self.assertEqual(s1.frac_same(s3), 0)\n self.assertEqual(s1.frac_same(s4), 1.0) # note truncation",
"def test_frac_same_non_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_same_non_gaps(y), z)\n\n test(s1, s2, 0.25)\n test(s1, s3, 0)\n test(s2, s3, 0.75)\n test(s1, s4, 0.5)\n test(s4, s5, 0)\n test(s4, s6, 0.6)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 2 / 3.0)\n test(e, s4, 0)",
"def test_frac_diff(self):\n s1 = self.RNA(\"ACGU\")\n s2 = self.RNA(\"AACG\")\n s3 = self.RNA(\"GG\")\n s4 = self.RNA(\"A\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff(e), 0)\n self.assertEqual(s1.frac_diff(s2), 0.75)\n self.assertEqual(s1.frac_diff(s3), 1)\n self.assertEqual(s1.frac_diff(s4), 0) # note truncation",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def similar(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def test_frac_same_gaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_same_gaps(s1), 1)\n self.assertEqual(s1.frac_same_gaps(s2), 1)\n self.assertEqual(s1.frac_same_gaps(s3), 0)\n self.assertEqual(s1.frac_same_gaps(s4), 0.5)\n self.assertEqual(s1.frac_same_gaps(s5), 0.5)\n self.assertEqual(s1.frac_same_gaps(s6), 0.5)\n self.assertEqual(s1.frac_same_gaps(s7), 0)\n self.assertEqual(s1.frac_same_gaps(e), 0)\n self.assertEqual(s3.frac_same_gaps(s3), 1)\n self.assertEqual(s3.frac_same_gaps(s4), 0.5)\n self.assertEqual(s3.frac_same_gaps(s7), 1.0)\n self.assertEqual(e.frac_same_gaps(e), 0.0)\n self.assertEqual(s4.frac_same_gaps(s5), 0.0)\n self.assertEqual(s4.frac_same_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_same_gaps(s8), 2 / 3.0)",
"def wordSimilarityRatio(sent_1,sent_2):",
"def test_frac_diffNonGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"AGGG\")\n s3 = self.RNA(\"GGGG\")\n s4 = self.RNA(\"AG--GA-G\")\n s5 = self.RNA(\"CU--CU-C\")\n s6 = self.RNA(\"AC--GC-G\")\n s7 = self.RNA(\"--------\")\n s8 = self.RNA(\"AAAA----\")\n s9 = self.RNA(\"A-GG-A-C\")\n e = self.RNA(\"\")\n\n def test(x, y, z):\n return self.assertFloatEqual(x.frac_diff_non_gaps(y), z)\n\n test(s1, s2, 0.75)\n test(s1, s3, 1)\n test(s2, s3, 0.25)\n test(s1, s4, 0.5)\n test(s4, s5, 1)\n test(s4, s6, 0.4)\n test(s4, s7, 0)\n test(s4, s8, 0.5)\n test(s4, s9, 1 / 3.0)\n test(e, s4, 0)",
"def cont_frac_rat( frac ):\n\n num = frac.numerator\n den = frac.denominator\n\n answer = []\n r1 = num\n r2 = den\n r3 = r1 % r2\n q = r1 / r2\n\n answer.append(q)\n\n while r3 != 0: # euclidean algorithm\n r1 = r2\n r2 = r3\n r3 = r1 % r2\n q = r1/r2\n answer.append(q)\n\n return answer",
"def test_frac_diffGaps(self):\n s1 = self.RNA(\"AAAA\")\n s2 = self.RNA(\"GGGG\")\n s3 = self.RNA(\"----\")\n s4 = self.RNA(\"A-A-\")\n s5 = self.RNA(\"-G-G\")\n s6 = self.RNA(\"UU--\")\n s7 = self.RNA(\"-\")\n s8 = self.RNA(\"GGG\")\n e = self.RNA(\"\")\n self.assertEqual(s1.frac_diff_gaps(s1), 0)\n self.assertEqual(s1.frac_diff_gaps(s2), 0)\n self.assertEqual(s1.frac_diff_gaps(s3), 1)\n self.assertEqual(s1.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s5), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s6), 0.5)\n self.assertEqual(s1.frac_diff_gaps(s7), 1)\n self.assertEqual(s1.frac_diff_gaps(e), 0)\n self.assertEqual(s3.frac_diff_gaps(s3), 0)\n self.assertEqual(s3.frac_diff_gaps(s4), 0.5)\n self.assertEqual(s3.frac_diff_gaps(s7), 0.0)\n self.assertEqual(e.frac_diff_gaps(e), 0.0)\n self.assertEqual(s4.frac_diff_gaps(s5), 1.0)\n self.assertEqual(s4.frac_diff_gaps(s6), 0.5)\n self.assertFloatEqual(s6.frac_diff_gaps(s8), 1 / 3.0)",
"def similarL(a, b, ratio):\n for x in b:\n if SequenceMatcher(None, a, x).ratio() > ratio:\n return x\n return False",
"def get_similarities(tags):\n return [(a, b)\n for (a, b) in itertools.permutations(tags, 2)\n if difflib.SequenceMatcher(a=a.lower(), b=b.lower()).ratio() > SIMILAR\n and a != b\n and b.endswith('s')]",
"def test_similarity_numeric():\n similarity = pm.compute_similarity_for_numeric(900, 800)\n nose.tools.ok_(abs(similarity - 8/9) < tests.FLOAT_DELTA, \"Wrong numeric similarity\")",
"def __getSimilarityScore(expected, actual):\n return SequenceMatcher(None, expected, actual).ratio()",
"def calc_similarity(lhs, rhs):\n lhs_decomp = decompose(lhs)\n rhs_decomp = decompose(rhs)\n dist = editdistance.eval(lhs_decomp, rhs_decomp)\n max_len = max(len(lhs_decomp), len(rhs_decomp))\n sim = float(max_len - dist) / float(max_len)\n logging.debug('SIM: [%s] vs [%s] ==> %d / %d = %f', lhs.encode('UTF-8'), rhs.encode('UTF-8'),\n max_len - dist, max_len, sim)\n return sim",
"def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)",
"def test_mixing_ratio():\n p = 998. * units.mbar\n e = 73.75 * units.mbar\n assert_almost_equal(mixing_ratio(e, p), 0.04963, 2)",
"def similarparts(imagparts):\n dupl = []\n global opt\n l = len(imagparts[0])-1\n \n for i in range(len(imagparts)-1): \n difs = sum(abs(x-y) for x,y in zip(imagparts[i][:l],imagparts[i+1][:l]))\n mean = float(sum(imagparts[i][:l])) / l\n dev = float(sum(abs(mean-val) for val in imagparts[i][:l])) / l\n if dev/mean >= float(opt.blcoldev):\n if difs <= int(opt.blsim):\n if imagparts[i] not in dupl:\n dupl.append(imagparts[i])\n if imagparts[i+1] not in dupl:\n dupl.append(imagparts[i+1])\n\n return dupl",
"def correct_fraction():\n with open(os.path.join(ocr.settings.BASE_DIR,\n 'training_set.json')) as file:\n training_set = json.load(file)\n correct = 0\n for letter in training_set['list']:\n print(letter['letter'])\n for _ in range(REPETITIONS):\n if ocr.basic_nn.tools.recognize_symbol(letter['inputs']) \\\n == letter['letter']:\n correct += 1 / REPETITIONS\n fraction = correct / len(training_set['list'])\n print(fraction)\n return fraction",
"def get_similarities(tags=None):\n tags = tags or _get_tags()\n # do your thing ...\n similar_tags = list()\n for item in itertools.permutations(tags, 2):\n diffchecker = SequenceMatcher(isjunk=None, a=item[0], b=item[1], autojunk=True)\n similarity = diffchecker.quick_ratio()\n if similarity >= SIMILAR:\n similar_tags.append(item)\n return similar_tags",
"def equivalent (self, factor):\n if (self.debug): print(f'enter fraction.equivalent {factor}')\n return (self.value[0] * factor, self.value[1]* factor)",
"def similarity(pair: Tuple[Text, Text]) -> float:\n (a, b) = pair\n missing = (\n True\n if any(symbol not in Metrics.realine.feature_matrix for symbol in pair)\n else False\n )\n return 0.0 if missing else 1 - Metrics.realine.delta(a, b)",
"def string_similarity(a, b):\n return SequenceMatcher(a=a, b=b).ratio()",
"def _favg(sequence):\n return math.fsum(sequence) / len(sequence)",
"def _match(a, b):\n return SequenceMatcher(None, a, b).ratio()",
"def ratio(n1,n2, explain=0, optimize=False):\n weight_normal_form = 5.0 #distance between soundexes of normal form\n weight_normal_form_soundex = 8.0 #average distance between soundexes of normal form\n weight_geslachtsnaam1 = 10.0 #distance between soundexes of geslachtsnamen\n weight_geslachtsnaam2 = 10.0 #distance between geslachtsnaam\n weight_initials = 2 #distance between initials\n\n nf1 = n1.guess_normal_form()\n nf2 = n2.guess_normal_form()\n\n if not nf1 or not nf2:\n return 0.0\n elif nf1 == nf2:\n return 1.0\n ratio_normal_form = Similarity.average_distance(split(nf1), split(nf2))\n \n #create a simkplified soundex set for this name\n #remove stopwords\n# nf1 = remove_stopwords( nf1)\n# nf2 = remove_stopwords( nf2)\n \n se1 = n1.get_normal_form_soundex()\n se2 = n2.get_normal_form_soundex()\n ratio_normal_form_soundex = Similarity.average_distance( se1, se2)\n \n #gelachtsnaam wordt op twee manieren met elkaar vergeleken\n g1 = n1.geslachtsnaam() #or n1.get_volledige_naam()\n g2 = n2.geslachtsnaam() #or n2.get_volledige_naam()\n g1 = to_ascii(g1)\n g2 = to_ascii(g2)\n if not optimize:\n #de soundexes van de achternaam worden meegewoen\n #g1_soundex = n1.soundex_nl(g1, group=2, length=-1)\n g1_soundex = n1.geslachtsnaam_soundex()\n #g2_soundex = n2.soundex_nl(g2, group=2, length=-1)\n g2_soundex = n2.geslachtsnaam_soundex()\n ratio_geslachtsnaam1 = Similarity.average_distance(g1_soundex, g2_soundex)\n else:\n ratio_geslachtsnaam1 = 1 \n weight_geslachtsnaam1 = 0\n \n #n de afstand van de woorden in de achtenraam zelf\n ratio_geslachtsnaam2 = Similarity.average_distance(\n re.split('[ \\.\\,\\-]', g1.lower()),\n re.split('[ \\.\\,\\-]', g2.lower()),\n levenshtein_ratio)\n n1_initials = n1.initials()\n n1_initials_lower = n1_initials.lower()\n n2_initials = n2.initials()\n n2_initials_lower = n2_initials.lower()\n n1_contains_initials = n1.contains_initials()\n n2_contains_initials = n2.contains_initials()\n #count initials only if we have more than one\n #(or perhaps make this: if we know the first name)\n if len(n1_initials) == 1 or len(n2_initials) == 1:\n #initials count much less if there is only one\n weight_initials = weight_initials_if_one_name_consists_of_one_word_only\n# ratio_initials = .5\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n elif n1_contains_initials or n2_contains_initials:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n weight_initials = weight_initials_if_one_name_is_in_initials\n elif len(n1_initials) > 1 and len(n2_initials) > 1:\n ratio_initials = levenshtein_ratio(n1_initials_lower, n2_initials_lower)\n else:\n ratio_initials = 0.7\n \n if n1_contains_initials or n2_contains_initials:\n weight_normal_form = weight_normal_form_if_one_name_is_in_initials \n weight_normal_form_soundex = weight_normal_form_soundex_if_one_name_is_in_initials\n\n counter = (ratio_normal_form * weight_normal_form +\n ratio_normal_form_soundex * weight_normal_form_soundex +\n ratio_geslachtsnaam1 * weight_geslachtsnaam1 +\n ratio_geslachtsnaam2 * weight_geslachtsnaam2 +\n ratio_initials * weight_initials)\n numerator = (weight_normal_form + weight_normal_form_soundex +\n weight_initials + weight_geslachtsnaam1 + weight_geslachtsnaam2)\n if numerator == 0:\n return 0.0\n final_ratio = counter/numerator\n\n if explain:\n s = '-' * 100 + '\\n'\n s += 'Naam1: %s [%s] [%s] %s\\n' % (n1, n1_initials, n1.guess_normal_form(), se1)\n s += 'Naam2: %s [%s] [%s] %s\\n' % (n2, n2_initials, n2.guess_normal_form(), se2)\n s += 'Similarity ratio: %s\\n' % final_ratio\n s += '--- REASONS' + '-' * 30 + '\\n'\n format_s = '%-30s | %-10s | %-10s | %-10s | %-10s | %s-10s\\n'\n s += format_s % ('\\t property', ' ratio', ' weight','relative_weight', ' r*w', 'r * relative_w')\n s += '\\t' + '-' * 100 + '\\n'\n format_s = '\\t%-30s | %-10f | %-10f | %-10f | %-10f | %-10f\\n'\n s += format_s % (' normal_form', ratio_normal_form, weight_normal_form,weight_normal_form/counter, ratio_normal_form * weight_normal_form, ratio_normal_form * weight_normal_form/counter)\n s += format_s % ('soundex van normal_form', ratio_normal_form_soundex, weight_normal_form_soundex,weight_normal_form_soundex/counter, ratio_normal_form_soundex* weight_normal_form_soundex, ratio_normal_form_soundex * weight_normal_form_soundex/counter)\n s += format_s % ('soundex van geslachtsnaam1', ratio_geslachtsnaam1, weight_geslachtsnaam1,weight_geslachtsnaam1/counter, ratio_geslachtsnaam1 * weight_geslachtsnaam1, ratio_geslachtsnaam1 * weight_geslachtsnaam1/counter)\n s += format_s % ('geslachtsnaam', ratio_geslachtsnaam2, weight_geslachtsnaam2,weight_geslachtsnaam2/counter, ratio_geslachtsnaam2 *weight_geslachtsnaam2 , ratio_geslachtsnaam2 * weight_geslachtsnaam2/counter)\n s += format_s % ('initials', ratio_initials, weight_initials, weight_initials/counter, ratio_initials *weight_initials, ratio_initials * weight_initials/counter)\n s += '\\tTOTAL (numerator) | %s (counter = %s)\\n' % (counter, numerator)\n \n return s\n return final_ratio",
"def test_similarity(self):\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'dog.n.01'), 1))\n self.assertTrue(np.allclose(self.vectors.similarity('dog.n.01', 'mammal.n.01'), 0.180901358))",
"def reciprocity_ratio(gd):\r\n reciprocal = 0.0\r\n edge_list = gd.get_edgelist()\r\n for i in it.permutations(range(gd.vcount()),2):\r\n if i in edge_list and i[::-1] in edge_list:\r\n reciprocal += 1.0\r\n return reciprocal/gd.ecount()",
"def frac_reverts(self):\n\n text = self.text()\n num_reverts = len(re.findall(\"Reverted\",text))\n return num_reverts/500",
"def get_equal_rate(str1, str2):\r\n\treturn difflib.SequenceMatcher(None, str1, str2).quick_ratio()"
] | [
"0.7017269",
"0.6662179",
"0.6660259",
"0.6638686",
"0.6638686",
"0.6472042",
"0.60609144",
"0.6037269",
"0.60179865",
"0.6010803",
"0.6006697",
"0.58934134",
"0.58387834",
"0.58198506",
"0.5782344",
"0.5776492",
"0.57633436",
"0.57590544",
"0.5711294",
"0.5676241",
"0.56577355",
"0.5623454",
"0.56057197",
"0.5589235",
"0.55883414",
"0.55853045",
"0.55687463",
"0.55492854",
"0.5495461",
"0.54749304"
] | 0.79655254 | 0 |
with_termini_unknown should reset termini to unknown char | def test_with_termini_unknown(self):
s1 = self.RNA("-?--AC--?-")
s2 = self.RNA("AC")
self.assertEqual(s1.with_termini_unknown(), "????AC????")
self.assertEqual(s2.with_termini_unknown(), "AC") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars == b'\\r'",
"def strip_other_charcter():\n pass",
"def test_term_chars_default(self, instrument):\n assert instrument.term_chars is None",
"def set_terminator (self, term):\r\n self.terminator = term",
"def quitar_tilde_y_may(caracter):\n caracter = caracter.lower()\n if caracter == \"á\": caracter = \"a\"\n if caracter == \"é\": caracter = \"e\"\n if caracter == \"í\": caracter = \"i\"\n if caracter == \"ó\": caracter = \"o\"\n if caracter == \"ú\": caracter = \"u\"\n return caracter",
"def process_default(self, character):\n pass",
"def FSMLetterSymbol(letter):\n return FSMEmptyWordSymbol if letter is None else repr(letter)",
"def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new",
"def get_word_char_exceptions(self): # real signature unknown; restored from __doc__\n return \"\"",
"def non_secret_char(c):\n return c",
"def test_unknown(self):\n result = self.flag.parseString('U')\n self.assertEqual('U', result[0])",
"def set_empty_character(self, char):\n self.empty_char = char",
"def test_missing_delim(self):",
"def _clean_magic(self, magic):\n if magic.lower() == 'o':\n return ''\n elif magic[:2].lower() == 'o:':\n return magic[2:]\n return magic",
"def _strip_nul(text):\n return text.replace('\\x00', '<NUL>')",
"def __init__(self, sep_text: Optional[str] = None):\n super().__init__()\n self.special_end_text = sep_text",
"def __init__(self, sep_text: Optional[str] = None):\n super().__init__()\n self.special_end_text = sep_text",
"def isSpecial(ansiCode,string):\n if IS_TERMINAL and not IS_WIN32: return ansiCode+string+ANSI_END\n else: return string",
"def clear_line(string):\n for character in string:\n #backtrack-whitespace-backtrack\n sys.stdout.write(\"\\b \\b\")",
"def __stripEol(self, txt):\n return txt.replace(\"\\r\", \"\").replace(\"\\n\", \"\")",
"def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data",
"def fix_missing_period(line):\n if \"@highlight\" in line: return line\n if line==\"\": return line\n if line[-1] in END_TOKENS: return line\n # print line[-1]\n return line + \" .\"",
"def missing_char(str, n):\r\n if n<=len(str):\r\n str = str.replace(str[n], \"\")\r\n return str",
"def clean_hanging_newline(t):\n if t and t[-1] == \"\\n\":\n return t[:-1]\n return t",
"def remove_bad_characters(self):\n\n self.categorie_name = self.categorie_name.replace(\"\\n\", \"\")",
"def _unknownRange(self):\n return ''",
"def test_letter_delimiter(self):\n self.non_default_delimiter_template('a')",
"def test_endswith_special_character(self):\n for c in b\"\\0\", b\"\\n\", b\"\\r\", b\" \":\n\n value = b\"value\" + c\n result = attributeAsLDIF(b\"key\", value)\n self.assertEqual(result, b\"key:: %s\\n\" % encode(value))",
"def fix_missing_period(line):\n if \"@highlight\" in line:\n return line\n if line == \"\":\n return line\n if line[-1] in END_TOKENS:\n return line\n return line + \" .\"",
"def _cleanup_string(self, bytes):\n try:\n b = bytes.index(b'\\x00')\n except ValueError:\n return bytes.decode('latin-1').strip()\n else:\n return bytes[:b].decode('latin-1').strip()"
] | [
"0.63693273",
"0.63148445",
"0.5928961",
"0.57174045",
"0.55202454",
"0.5488006",
"0.54239345",
"0.53940064",
"0.53883356",
"0.53773767",
"0.5307043",
"0.52910495",
"0.52059764",
"0.5167487",
"0.51656246",
"0.51492214",
"0.51492214",
"0.51466554",
"0.5135072",
"0.5101027",
"0.50992787",
"0.50976163",
"0.5062082",
"0.50604177",
"0.5045607",
"0.50258243",
"0.501868",
"0.49960807",
"0.498805",
"0.49699613"
] | 0.7095066 | 0 |
gap degen character should be treated consistently | def test_consistent_gap_degen_handling(self):
# the degen character '?' can be a gap, so when we strip either gaps or
# degen characters it should be gone too
raw_seq = "---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--"
raw_ungapped = re.sub("[-?]", "", raw_seq)
raw_no_ambigs = re.sub("[N?]+", "", raw_seq)
dna = self.DNA(raw_seq)
self.assertEqual(dna.degap(), raw_ungapped)
self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)
self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_is_gap(self):\n r = self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\")\n for char in \"qwertyuiopasdfghjklzxcvbnmQWERTYUIOASDFGHJKLZXCVBNM\":\n assert not r.is_gap(char)\n assert r.is_gap(\"-\")\n # only works on a single literal that's a gap, not on a sequence.\n # possibly, this behavior should change?\n assert not r.is_gap(\"---\")\n # check behaviour on self\n assert not self.RNA(\"CGAUACGUACGACU\").is_gap()\n assert not self.RNA(\"---CGAUA----CGUACG---ACU---\").is_gap()\n assert self.RNA(\"\").is_gap()\n assert self.RNA(\"----------\").is_gap()",
"def _substitute_opening_gap_char(seq):\n newseq=list(seq)\n iterator=rex.finditer(seq)\n for match in iterator:\n try:\n newseq[match.span()[1]-1]=\"|\"\n except:\n continue\n return \"\".join(newseq)",
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def test_is_gapped(self):\n assert not self.RNA(\"\").is_gapped()\n assert not self.RNA(\"ACGUCAGUACGUCAGNRCGAUcaguaguacYRNRYRN\").is_gapped()\n assert self.RNA(\"-\").is_gapped()\n assert self.PROT(\"--\").is_gapped()\n assert self.RNA(\"CAGUCGUACGUCAGUACGUacucauacgac-caguACUG\").is_gapped()\n assert self.RNA(\"CA--CGUAUGCA-----g\").is_gapped()\n assert self.RNA(\"CAGU-\").is_gapped()",
"def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True",
"def checkForNOrGap(character):\n if character == \"-\" or character == \"N\":\n return False\n else:\n return True",
"def test_degap(self):\n # doesn't preserve case\n self.assertEqual(self.RNA(\"\").degap(), \"\")\n self.assertEqual(\n self.RNA(\"GUCAGUCgcaugcnvuncdks\").degap(), \"GUCAGUCGCAUGCNVUNCDKS\"\n )\n self.assertEqual(self.RNA(\"----------------\").degap(), \"\")\n self.assertEqual(self.RNA(\"gcuauacg-\").degap(), \"GCUAUACG\")\n self.assertEqual(self.RNA(\"-CUAGUCA\").degap(), \"CUAGUCA\")\n self.assertEqual(self.RNA(\"---a---c---u----g---\").degap(), \"ACUG\")\n self.assertEqual(self.RNA(\"?a-\").degap(), \"A\")",
"def gk_g_checker(self, seq):\n seq = re.sub(r'гк', r'хк', seq)\n return seq",
"def strip_other_charcter():\n pass",
"def test_first_gap(self):\n self.assertEqual(self.RNA(\"\").first_gap(), None)\n self.assertEqual(self.RNA(\"a\").first_gap(), None)\n self.assertEqual(self.RNA(\"uhacucHuhacUUhacan\").first_gap(), None)\n self.assertEqual(self.RNA(\"-abc\").first_gap(), 0)\n self.assertEqual(self.RNA(\"b-ac\").first_gap(), 1)\n self.assertEqual(self.RNA(\"abcd-\").first_gap(), 4)",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"ACG--GRN?\")\n self.assertEqual(r.strip_bad_and_gaps(), \"ACGGRN\")\n r._data[0] = 99\n self.assertEqual(r.strip_bad_and_gaps(), \"CGGRN\")",
"def non_secret_char(c):\n return c",
"def test_letter_delimiter(self):\n self.non_default_delimiter_template('a')",
"def _encode_gap(self):\n\t\tgap_length = self.config.get('repeat_gap',\n\t\t self.config.get('gap',\n\t\t 0))\n\t\treturn self._encode_bit('0', gap_length)",
"def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf",
"def horizontal_char(self):\n ...",
"def beginning_checker(self, translit):\n tr_new = re.sub(r'(\\A|·)夫', r'\\1弗', translit)\n tr_new = re.sub(r'(\\A|·)耶', r'\\1叶', tr_new)\n return tr_new",
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )",
"def character(self) -> str:\r\n return self.char if self.was_guessed else '_'",
"def keep_chr(char):\n return (unicodedata.category(char).startswith('P') and\n (char != \"#\" and char != \"@\" and char != \"&\"))",
"def guess_seq(seq):\n dna = \"ACTG-N\"\n \n chars = util.unique(seq.upper())\n \n for char in chars:\n if char not in dna:\n return \"pep\"\n return \"dna\"",
"def quitar_tilde_y_may(caracter):\n caracter = caracter.lower()\n if caracter == \"á\": caracter = \"a\"\n if caracter == \"é\": caracter = \"e\"\n if caracter == \"í\": caracter = \"i\"\n if caracter == \"ó\": caracter = \"o\"\n if caracter == \"ú\": caracter = \"u\"\n return caracter",
"def dummy_junction14():\n return \"junction:chr1:176-324:+\"",
"def test_not_gap(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertTrue(not_gap(m[0]))\n self.assertFalse(not_gap(m[5]))",
"def dummy_junction13():\n return 'junction:chr1:176-299:+'",
"def get_space_bytes(self, bytes_):\n if self.state == 'S':\n # in space eating mode\n # control space needed?\n if bytes_.startswith(u' '):\n # replace by control space\n return u'\\\\ ', bytes_[1:]\n else:\n # insert space (it is eaten, but needed for separation)\n return u' ', bytes_\n else:\n return u'', bytes_",
"def remove_end_spaces(conversion):\n ending = False\n while ending == False:\n if conversion[-1] == ' ':\n del conversion[-1]\n if conversion[-1] != ' ':\n ending = True",
"def filter_jchars(c):\r\n if is_asian(c):\r\n return ' '\r\n return c",
"def test_gap_vector(self):\n\n def g(x):\n return self.RNA(x).gap_vector()\n\n self.assertEqual(g(\"\"), [])\n self.assertEqual(g(\"ACUGUCAGUACGHCSDKCCUCCDNCNS\"), [False] * 27)\n self.assertEqual(\n g(\"GUACGUAACAKADC-SDAHADSAK\"),\n list(map(bool, list(map(int, \"000000000000001000000000\")))),\n )\n self.assertEqual(g(\"-DSHSUHDSS\"), list(map(bool, list(map(int, \"1000000000\")))))\n self.assertEqual(\n g(\"UACHASCAGDS-\"), list(map(bool, list(map(int, \"000000000001\"))))\n )\n self.assertEqual(\n g(\"---CGAUgCAU---ACGHc---ACGUCAGU--?\"),\n list(map(bool, list(map(int, \"111000000001110000011100000000111\")))),\n )",
"def get_word_char_exceptions(self): # real signature unknown; restored from __doc__\n return \"\""
] | [
"0.69210255",
"0.64609724",
"0.63262624",
"0.63087875",
"0.6294152",
"0.6294152",
"0.6140576",
"0.6091902",
"0.60851234",
"0.5905722",
"0.5901116",
"0.5892332",
"0.5745669",
"0.5726817",
"0.5707481",
"0.5671575",
"0.5660184",
"0.565967",
"0.56095153",
"0.5588575",
"0.5571344",
"0.5546762",
"0.55393183",
"0.5532575",
"0.55154866",
"0.55060107",
"0.5505029",
"0.5489977",
"0.5479038",
"0.5439916"
] | 0.7278505 | 0 |
DnaSequence should behave as expected | def test_DnaSequence(self):
x = DnaSequence("tcag")
# note: no longer preserves case
self.assertEqual(x, "TCAG")
x = DnaSequence("aaa") + DnaSequence("ccc")
# note: doesn't preserve case
self.assertEqual(x, "AAACCC")
assert x.moltype is DNA
self.assertRaises(AlphabetError, x.__add__, "z")
self.assertEqual(DnaSequence("TTTAc").rc(), "GTAAA") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ModelDnaCodonSequence(self):\n d = ArrayDnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(d), \"TTTCGT\")\n self.assertEqual(d._data, array([0, 28]))\n self.assertEqual(str(d.to_rna()), \"UUUCGU\")\n self.assertEqual(str(d.to_dna()), \"TTTCGT\")",
"def translate_DNA(dnaseq):\n\n gen = aa_generator_DNA(dnaseq)\n seq = ''\n aa = next(gen, None)\n while aa:\n seq += aa\n aa = next(gen, None)\n return seq",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SEQ(even, name=\"even\")\n odd_dna = self.SEQ(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def RNAorDNA ( seq ) :\n\tif dna_regex . search ( seq ):\n\t\treturn RNA ( seq )\n\n\tif rna_regex . search ( seq ):\n\t\treturn DNA ( seq )",
"def aa_generator_DNA(dnaseq):\n return (translate_DNA_codon(dnaseq[n:n+3])\n for n in range(0, len(dnaseq), 3))",
"def dna(self):\n return self.seq.replace('U', 'T').replace('u', 't')",
"def initialize_dna(self):\n return np.random.rand(1, self.n_genes) * 2 - 1",
"def test_dna_existent_sequence(self):\n pairs = [\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n ]\n\n dna = DNA()\n for codon_pair in pairs:\n dna.append(codon_pair)\n\n for index, code_pair in enumerate(dna):\n self.assertEqual(code_pair.sequence, pairs[index].sequence)",
"def test_to_dna(self):\n r = self.RNA(\"TCA\")\n self.assertEqual(str(r), \"UCA\")\n self.assertEqual(str(r.to_dna()), \"TCA\")",
"def test_correct_sequence_list(self):\n pairs = [\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n ]\n\n sequences = [p.sequence for p in pairs]\n\n dna = DNA()\n [dna.append(p) for p in pairs]\n\n # Sequence must match\n self.assertEqual(dna.to_sequence_list(), sequences)",
"def dna_number(bp_seq):\r\n # Hint: use dna_digit\r\n\r\n # YOUR CODE HERE\r",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def __init__(self):\n self.dna = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_ModelRnaCodonSequence(self):\n r = ArrayRnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(r), \"UUUCGU\")\n self.assertEqual(r._data, array([0, 28]))\n self.assertEqual(str(r.to_rna()), \"UUUCGU\")\n self.assertEqual(str(r.to_dna()), \"TTTCGT\")",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def __init__(self, dna):\n self.dna = dna",
"def coding_strand_to_AA(dna):\n amino_acid=\"\"\n for i in range(0, len(dna), 3):\n mycodon=dna[i:i+3]\n # print'this is my codon'\n #print mycodon\n for j in range(len(codons)):\n for k in range(len(codons[j])):\n #print codons[j][k]\n if codons[j][k] == mycodon:\n #print aa[j]\n amino_acid += aa[j]\n return amino_acid\n \n #step uno break apart string into groups of three\n #find sequence +find index\n #then connect to amino acids ",
"def is_aligned_dna(sequence):\r\n #ensure that the given sequence is uppercase\r\n sequence = sequence.upper()\r\n \r\n #replace all A C G and T and compare length with 0\r\n if len(sequence.replace(\"A\", \"\").replace(\"C\", \"\").replace(\"G\",\"\").replace(\"T\",\"\").replace(\"-\",\"\")) == 0:\r\n return True\r\n else:\r\n return False",
"def test_to_rna(self):\n r = self.DNA(\"UCA\")\n self.assertEqual(str(r), \"TCA\")\n self.assertEqual(str(r.to_rna()), \"UCA\")",
"def dnasequence(self):\n return parseSingleFasta(open(self.dna_file).readlines())[1]"
] | [
"0.73271704",
"0.68954426",
"0.6802941",
"0.6800046",
"0.67086923",
"0.6665731",
"0.6584737",
"0.65618503",
"0.6433904",
"0.6413777",
"0.6383003",
"0.6311124",
"0.61830586",
"0.6149528",
"0.61370975",
"0.6119347",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.61098856",
"0.6060117",
"0.6029381",
"0.60219604",
"0.5986268",
"0.5974198"
] | 0.6965354 | 1 |
Sequence to_fasta() should return Fastaformat string | def test_to_fasta(self):
even = "TCAGAT"
odd = even + "AAA"
even_dna = self.SequenceClass(even, name="even")
odd_dna = self.SequenceClass(odd, name="odd")
self.assertEqual(even_dna.to_fasta(), ">even\nTCAGAT\n")
# set line wrap to small number so we can test that it works
self.assertEqual(even_dna.to_fasta(block_size=2), ">even\nTC\nAG\nAT\n")
self.assertEqual(odd_dna.to_fasta(block_size=2), ">odd\nTC\nAG\nAT\nAA\nA\n")
# check that changing the linewrap again works
self.assertEqual(even_dna.to_fasta(block_size=4), ">even\nTCAG\nAT\n") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SEQ(even, name=\"even\")\n odd_dna = self.SEQ(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def to_string(fasta):\n\n # remove header\n fasta_nh = fasta.readlines()[1:]\n\n # make into single string\n fasta_str = ''.join(fasta_nh)\n\n # remove newline characters\n seq = fasta_str.replace(\"\\n\", \"\")\n\n return seq",
"def make_fasta(data):\n result = data\n if not data.startswith(\">\"):\n result = \"\"\n cnt = 1\n for line in data.split('\\n'):\n if line:\n result += \">seq{}\\n\".format(cnt)\n result += line\n result += \"\\n\"\n cnt += 1\n return result.strip()",
"async def generate_sequence_fasta(db, sequence_id):\n sequence = await db.sequences.find_one(sequence_id, [\"sequence\", \"otu_id\", \"isolate_id\"])\n\n if not sequence:\n raise virtool.errors.DatabaseError(\"Sequence does not exist\")\n\n otu_name, isolate_name = await get_otu_and_isolate_names(db, sequence[\"otu_id\"], sequence[\"isolate_id\"])\n\n fasta = format_fasta_entry(\n otu_name,\n isolate_name,\n sequence_id,\n sequence[\"sequence\"]\n )\n\n return format_fasta_filename(otu_name, isolate_name, sequence[\"_id\"]), fasta",
"def Seq2fasta(idsSeqs):\n for data in idsSeqs:\n if data[0] != '':\n print(\">\" + data[0], end = '\\n')\n tmp = 0\n for c in range(len(data[1])+1):\n if data[1] == '':\n break \n else:\n if c % 60 == 0 and c != 0:\n print(data[1][tmp:c] + '')\n tmp = c\n elif c == len(data[1]): \n print(data[1][tmp:] + '')\n break\n else:\n break",
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def fasta_format(self, line_width=None):\n return fasta_formatted_string(self.name, self._sequence,\n description=self.description,\n line_width=line_width)",
"def format_fasta(title, sequence):\n fasta_width = 70 # Number of characters in one line\n\n n_lines = 1 + len(sequence) // fasta_width # Number of lines\n\n lines = [ sequence[i*fasta_width: (i+1)*fasta_width] for i in range(n_lines)]\n lines = \"\\n\".join(lines)\n \n formatted = f\"> {title}\\n{lines}\\n\\n\"\n return formatted",
"def df2fasta(df, fn, sep='.', columns=None):\n if columns is None:\n columns = list(df.columns)\n if 'seq' in columns:\n columns.remove('seq')\n with open(fn, 'w') as fh:\n for ind, row in df.iterrows():\n label = '>%s' % ind\n for col in columns:\n label += '%s%s' % (sep, row[col])\n fh.write('%s\\n' % label)\n fh.write('%s\\n' % row['seq'])",
"def generate_fasta(sequences, fasta_path):\n\n with open(fasta_path, 'w+') as f:\n for i in range(len(sequences)):\n f.write('>seq '+str(i))\n f.write('\\n')\n f.write(sequences[i])\n f.write('\\n')",
"def transeq(seq):\n \n temp_file = 'PATH/TO/ROOT/Database_Outputs/temp.fasta'\n temp = open(temp_file, 'w')\n temp.write(\">Just a formality \\n\"+seq)\n temp.close()\n \n trans = \"PATH/TO/ROOT/BLISTR_support_programs/./transeq -sequence \"+temp_file+\" -outseq \"+temp_file[:-6]+\".faa\"\n proc = subprocess.Popen(trans, shell=True)\n proc.wait()\n \n temp = open(temp_file[:-6]+\".faa\", 'r')\n new_seq = \"\"\n for line in temp:\n if line.startswith(\">\"):\n continue\n new_seq += line\n \n os.remove(temp_file)\n os.remove(temp_file[:-6]+\".faa\")\n \n return new_seq",
"def test_fasta(self):\n aln2fasta = hhsuite.AlignmentToFasta()\n self.assertEqual(\n aln2fasta.fasta(self.hit, \"A-E----\"),\n \">Query\\nJKLMNOP\\n>Template\\nA-E----\\n\")",
"def fasta2align(fn,uniqueIndex=True):\n return fasta2df(fn, sep=None, columns=['name'], index='name', uniqueIndex=uniqueIndex).seq",
"def test_make_fasta_rec(self):\r\n header = '>E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0'\r\n seq = 'CTGGTC'\r\n qual = map(int, '32 32 32 19 19 19'.split())\r\n self.assertEqual(make_fastq_rec(header, seq, qual),\r\n \"\"\"@E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nCTGGTC\r\n+E2_1 FYI2DSB01B17QJ orig_bc=ATCACTAGTCAC new_bc=ATCACTAGTCAC bc_diffs=0\r\nAAA444\"\"\")",
"def coding_strand_to_AA(dna):\n #inital conditions\n protein = ''\n i = 0\n\n #for the length of DNA, translate each codon in an ORF to an amino acid\n while i < (len(dna)-2):\n codon = dna[i:i+3] \n amino_acid = aa_table[codon]\n protein= protein + amino_acid\n i += 3\n\n #return the string of amino acids\n return protein",
"def convertFastqToFasta(inputFastq, outputFasta):\n out = open(outputFasta, \"w\")\n for (titleStr, seqStr, qualityStr) in FastqIterator(inputFastq):\n out.write(\">%s\\n%s\\n\" % (titleStr, seqStr))",
"def fasta_from_sequences(seqs, make_seqlabel=None, line_wrap=None):\n warnings.warn(\n \"`fasta_from_sequences` is deprecated and will be removed in \"\n \"scikit-bio 0.3.0. Please update your code to use `skbio.io.write`.\",\n DeprecationWarning)\n\n fasta_list = []\n for i, seq in enumerate(seqs):\n # Check if it has a label, or one is to be created\n label = str(i)\n if make_seqlabel is not None:\n label = make_seqlabel(seq)\n elif hasattr(seq, 'id') and seq.id:\n label = seq.id\n elif hasattr(seq, 'Label') and seq.Label:\n label = seq.Label\n elif hasattr(seq, 'Name') and seq.Name:\n label = seq.Name\n\n # wrap sequence lines\n seq_str = str(seq)\n if line_wrap is not None:\n numlines, remainder = divmod(len(seq_str), line_wrap)\n if remainder:\n numlines += 1\n body = [seq_str[j * line_wrap:(j + 1) * line_wrap]\n for j in range(numlines)]\n else:\n body = [seq_str]\n\n fasta_list.append('>' + label)\n fasta_list += body\n\n return '\\n'.join(fasta_list)",
"def format_fasta(name, seq, wrap=60):\n return \">{}\\n{}\".format(name, textwrap.fill(seq, width=wrap))",
"def toString(self, format_='fasta', structureSuffix=':structure'):\n if format_ == 'fasta':\n return '>%s\\n%s\\n>%s%s\\n%s\\n' % (\n self.id, self.sequence, self.id, structureSuffix,\n self.structure)\n else:\n raise ValueError(\"Format must be 'fasta'.\")",
"def pdb_to_fasta(pdb_input):\n p = PDBParser(PERMISSIVE=1)\n structure = p.get_structure(pdb_input, pdb_input)\n file_name = pdb_input[0:-4] + \".fasta\"\n fasta_file = open(file_name, 'w')\n for model in structure:\n for chain in model:\n seq = list()\n chainID = chain.get_id()\n\n for residue in chain:\n if is_aa(residue.get_resname(), standard=True):\n seq.append(three_to_one(residue.get_resname()))\n else:\n seq.append(\"X\")\n chain_line = \">Chain_\" + chainID + \"\\n\" + str(\"\".join(seq)) + \"\\n\" + \"\\n\"\n fasta_file.write(chain_line)\n\n fasta_file.close()",
"def align2fasta(align, fn, applyPadding = True):\n align = padAlignment(align, applyPadding)\n\n with open(fn, 'w') as fh:\n for i in np.arange(align.shape[0]):\n ind = align.index[i]\n fh.write('>%s\\n' % ind)\n fh.write('%s\\n' % align.iloc[i])",
"def translateORFtoAAs(self,sequence,number):\r\n AAStringfromORF = str()\r\n startingM = int()\r\n for i in range(0,len(sequence)-2,3):\r\n if sequence[i:i+3] != \"AUG\":\r\n pass\r\n else:\r\n startingM = i\r\n for i in range(startingM,len(sequence)-2,3):\r\n x = self.tabletoTranslate(sequence[i:i+3])\r\n AAStringfromORF+=x\r\n if x == \"-\":\r\n self.listofSequences.append(AAStringfromORF.rstrip(\"-\").lstrip().rstrip())\r\n AAStringfromORF = str()\r\n break",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def gff2FA(annotation, sequence, windows, output):\n df_gff = pd.read_csv(annotation, index_col=False, sep='\\t', header=None, comment=\"#\")\n df_gff.columns = ['seqname', 'source', 'feature', 'start', 'end', 'score', 'strand', 'frame', 'attribute']\n fasta_seq = SeqIO.parse(sequence, 'fasta')\n buffer_seqs = []\n cont = 0\n for record in fasta_seq:\n print(record.id)\n dff_extract = df_gff[df_gff.seqname == record.id]\n for key,val in dff_extract.iterrows():\n clean_seq = ''.join(str(record.seq).splitlines())\n if int(val.start) - windows < 0:\n start = 0\n else:\n start = int(val.start) - windows\n if int(val.end) + windows > len(clean_seq):\n end = len(clean_seq)\n else:\n end = int(val.end) + windows\n new_seq = clean_seq[start:end]\n att = val.attribute\n id = record.id + '_' + str(start) + '_' + str(end)\n desc = \"seq_id:\" + str(record.id)\n desc += \" feature_start:\" + str(val.start)\n desc += \" feature_end:\" + str(val.end)\n desc += \" genome_start:\" + str(start)\n desc += \" genome_end:\" + str(end)\n desc += \" feature:\" + str(val.feature)\n desc += \" attributes:\" + val.attribute\n seq = SeqRecord(Seq(new_seq), id=id, description=desc)\n buffer_seqs.append(seq)\n cont += 1\n if output:\n print('Saving...')\n SeqIO.write(buffer_seqs, output, \"fasta\")\n else:\n return buffer_seqs",
"def generate_fasta(seq_file, out_dir):\n\n LOGGER.info(\"Generating fasta file\", seq_file)\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, \"missing_seqs.log\")\n logging.basicConfig(filename=log_file, filemode='w', level=logging.INFO)\n\n cnx = RfamDB.connect()\n cursor = cnx.cursor(raw=True)\n\n # fetch clan specific family full_region data and sequence description\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"ORDER BY fr.rfam_acc\")\n\n cursor.execute(query)\n\n for region in cursor:\n\n # new family\n if str(region[RFAM_ACC]) != rfam_acc:\n # check if there's no open file\n if fp_out is not None:\n fp_out.close()\n\n # open new fasta file\n fp_out = gzip.open(\n os.path.join(out_dir, str(region[RFAM_ACC]) + \".fa.gz\"), 'w')\n\n rfam_acc = region[RFAM_ACC]\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(sequence)\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def get_fasta(self):\n\t\tif not self.fastas:\n\t\t\treturn None\n\t\telif self.fastas.get('twodirections') is not None:\n\t\t\treturn self.fastas.get('twodirections')\n\t\telif self.fastas.get('template') is not None:\n\t\t\treturn self.fastas.get('template')\n\t\telif self.fastas.get('complement') is not None:\n\t\t\treturn self.fastas.get('complement')",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def fast_Q2A(fastq_filepath):\n filein = open(fastq_filepath, \"r\")\n fileout = open(fastq_filepath[:-5] + \"fasta\", \"w\")\n found_id = 0\n num_of_seqs = 0\n for i in filein:\n if i[0] == \"@\":\n seq_id = \">\" + i[1:]\n found_id = 1\n num_of_seqs += 1\n continue\n if found_id == 1:\n seq = i\n found_id = 0\n fileout.write(seq_id + seq)\n filein.close()\n fileout.close()\n print num_of_seqs\n return os.path.abspath(fileout.name)",
"def getSequence(ref, fasta):\n\n fasta_header = \"\"\n\n fh_fasta = open(fasta, \"r\")\n entry = (x[1] for x in groupby(fh_fasta, lambda line: line[0] == \">\"))\n\n for header in entry:\n headerStr = header.__next__()[1:].strip()\n\n seq = \"\".join(s.strip() for s in entry.__next__())\n\n if ref == headerStr.replace('>',''):\n filename = os.path.join(os.getcwd(), ref.replace('/','_').split('|')[0])\n fasta_header = replace_char(headerStr)\n\n with open(filename + '.fa', \"w\") as output_file:\n output_file.write(\">\" + fasta_header + \"\\\\n\" + seq.upper() + \"\\\\n\")\n\n fh_fasta.close()\n return fasta_header",
"def read_fasta_sequences_to_str(filename):\n with open(filename) as f:\n lines = [line.strip() for line in f.readlines()]\n sequences = []\n text = ''\n\n for line in lines:\n if line[0] == '>':\n if len(text) > 0:\n sequences.append(text)\n text = ''\n else:\n if len(line):\n text += line\n if len(text) > 0:\n sequences.append(text)\n\n return sequences"
] | [
"0.74317557",
"0.7302847",
"0.70808065",
"0.6741352",
"0.67153966",
"0.67040503",
"0.6695643",
"0.66434294",
"0.66212684",
"0.6602914",
"0.6531266",
"0.65176135",
"0.65008026",
"0.6492443",
"0.648414",
"0.64614666",
"0.638201",
"0.6333177",
"0.6327045",
"0.62993723",
"0.62658256",
"0.622666",
"0.62186885",
"0.62136865",
"0.62016344",
"0.6164991",
"0.6160582",
"0.6150265",
"0.6123191",
"0.6081215"
] | 0.74931866 | 0 |
Sequence to_phylip() should return oneline phylip string | def test_to_phylip(self):
s = self.SequenceClass("ACG", name="xyz")
self.assertEqual(s.to_phylip(), "xyz" + " " * 27 + "ACG") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _toPhylip(seq, width=None, name=None):\n\n if name is None:\n name = seq.name\n\n output = '%-10s%s' % (name[:10], seq.seq)\n\n if width:\n output = textwrap.fill(output, width)\n\n return output",
"def __str__(self):\n A, b = self.A, self.b\n A_rows = str(A).split('\\n')\n n_rows = len(A_rows)\n # column vector from `b`, if not already one\n b_col = b.reshape(b.shape[0], 1) if len(b.shape) == 1 else b\n b_rows = str(b_col).split('\\n')\n # place an \"x\" somewhere near the middle\n x_row = int((n_rows - 1) / 2) # where \"x\" is shown\n above = x_row\n below = (n_rows - x_row - 2)\n spacer = ' | '\n last_middle = [spacer[1:]] if n_rows > 1 else []\n middle = (\n above * [spacer]\n + [' x <= ']\n + below * [spacer]\n + last_middle)\n assert len(middle) == n_rows, (middle, n_rows)\n # format lines\n lines = [A_rows[k] + middle[k] + b_rows[k]\n for k in range(n_rows)]\n output = 'Single polytope \\n {lines}\\n'.format(\n lines='\\n '.join(lines))\n return output",
"def fastaToPhylip(self, records):\n SeqIO.write(records, self.newPhylip, 'phylip') # Writes a new .phy file containing the SeqRecord for specific gene",
"def to_motevo(self):\n m = \"//\\n\"\n m += \"NA {}\\n\".format(self.id)\n m += \"P0\\tA\\tC\\tG\\tT\\n\"\n for i, row in enumerate(self.pfm):\n m += \"{}\\t{}\\n\".format(i, \"\\t\".join([str(int(x)) for x in row]))\n m += \"//\"\n return m",
"def make_lexicon_txt(self):\n raise NotImplementedError",
"def __repr__(self):\n s = ''\n no = self.getRaiz()\n s += str(no.getPai()) + '\\n'\n s += '^' + '\\n'\n s += str(no.getEsquerdo()) + ' <- '\n s += str(no.getDado()) + ' -> '\n s += str(no.getDireito())\n return s",
"def __str__(self):\n st=\"\"\n for g in self:\n st+=g.fasta()\n st+=\"\\n\"\n return st",
"def tikzcode(self):\n tex = \"\"\n tex += r\"\\draw\"\n if len(self.options):\n options = ', '.join(self.options)\n tex += \"[{options}] \".format(options=options)\n tex += \"({a.xpos:.4f},{a.ypos:.4f}) \".format(a=self.node_a)\n tex += \"to\"\n # if the nodes are arranged, then they have angle in/out\n inout = []\n inout.append('out={angle!s}'.format(angle=self.node_a.angle_inout))\n inout.append('in={angle!s}'.format(angle=self.node_b.angle_inout))\n if inout:\n tex += \"[\" + \", \".join(inout) + \"] \"\n tex += \"({b.xpos:.4f},{b.ypos:.4f})\".format(b=self.node_b)\n tex += \";\\n\"\n return tex",
"def __repr__(self):\n return \"{}: {}\".format(self.nodeid, self.lemma)",
"def __repr__(self):\n hex_str = binascii.hexlify(bytes(self)).decode()\n strout = \"\"\n first = True\n for index in range(0, 28, 2):\n if first:\n first = False\n else:\n strout = strout + \".\"\n strout = strout + hex_str[index : index + 2]\n return strout",
"def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s",
"def __str__(self):\n output = \"0->\"\n for c in self.route:\n output += str(c) + \"->\"\n output += \"0\"\n return output",
"def __str__(self):\n temp = self.__head\n ss = []\n while temp is not None:\n ss.append(str(temp.data))\n temp = temp.next_node\n return ('\\n'.join(ss))",
"def __str__(self):\n\n result = \"\"\n\n temp = self.head\n while temp is not None:\n result += str(temp.data) + \" -> \"\n temp = temp.next\n\n return result[0:-4]",
"def latex(self):\n from nodepy.snp import printable\n sep = ' & '\n s= r'\\begin{align}' + '\\n'\n s+=r'\\begin{array}{c|'\n s+='c'*len(self)\n s+='}\\n'\n for i in range(len(self)):\n s+=printable(self.c[i]) + sep\n s += sep.join([printable(aij) for aij in self.A[i,:]])\n s+=r'\\\\' + '\\n'\n s+=r'\\hline' + '\\n'\n s += sep\n s += sep.join([printable(bj) for bj in self.b])\n s+=r'\\\\' + '\\n'\n if hasattr(self,'bhat'):\n s += sep\n s += sep.join([printable(bj) for bj in self.bhat])\n s += '\\n'\n s += r'\\end{array}' + '\\n'\n s += r'\\end{align}'\n s=s.replace('- -','')\n return s",
"def __str__(self):\n tapeline = self.tape.format(\n self.index - 10, self.index + 11) + ' : state {}'.format(self.state)\n pointline = ' ' * 10 + '^' + ' ' * 11 + \\\n ' : index {}'.format(self.index)\n\n return tapeline + '\\n' + pointline",
"def skbio2phylo(treenode, format=\"newick\"):\n with tempfile.NamedTemporaryFile(delete=True, mode=\"w\") as tempf:\n treenode.write(tempf.name, format)\n tempf.flush()\n return Phylo.read(tempf.name, format)",
"def make_to_string(front, mid, back, empty_repr):\n \"*** YOUR CODE HERE ***\"\n def printer(lnk):\n if lnk == Link.empty:\n return empty_repr\n else:\n return front + str(lnk.first) + mid + printer(lnk.rest) + back\n return printer",
"def __init__(self, is_p1_turn: bool, side_length: int) -> None:\n super().__init__(is_p1_turn)\n self.side_length = side_length\n # ISSUE: what if node is more than 26 --> no need to handle side more than 5\n # construct a list of uppercase and lower case letters\n alph_lst_upper = list(string.ascii_uppercase)\n alph_lst_lower = list(string.ascii_lowercase)\n # alph_lst has a length of 52\n alph_lst = alph_lst_upper + alph_lst_lower\n\n # assign original value for each ley-line\n hori_result = []\n for i in range(side_length + 1):\n hori_result.append(\"@\")\n left_result = []\n for i in range(side_length + 1):\n left_result.append(\"@\")\n right_result = []\n for i in range(side_length + 1):\n right_result.append(\"@\")\n self.hori_result = hori_result\n self.left_result = left_result\n self.right_result = right_result\n\n self.hori_lst = []\n self.left_lst = []\n self.right_lst = []\n\n # construct horizontal ley-lines\n n = 2\n start_index = 0\n end_index = 0\n while n <= side_length + 1:\n end_index = start_index + n\n self.hori_lst.append(alph_lst[start_index:end_index])\n start_index = end_index\n n += 1\n end_index = start_index + side_length\n self.hori_lst.append(alph_lst[start_index:end_index])\n\n # copy hori_lst\n hori_copy = []\n for item in self.hori_lst:\n hori_copy.append(item)\n\n # construct left ley-lines\n for i in range(side_length + 1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) > i:\n temp.append(lst[i])\n self.left_lst.append(temp)\n for i in range(1, side_length + 1):\n self.left_lst[i].append(hori_copy[-1][i - 1])\n\n # construct right ley-lines\n for i in range(-1, side_length * (-1) - 2, -1):\n temp = []\n for lst in hori_copy[:len(hori_copy) - 1]:\n if len(lst) >= i * (-1):\n temp.append(lst[i])\n self.right_lst.append(temp)\n self.right_lst = self.right_lst[::-1]\n for i in range(side_length):\n self.right_lst[i].append(hori_copy[-1][i])",
"def __repr__(self):\n return self.matrix and '\\n'.join([\"|%s|\" % s for s in [' '.join([\"%-6.3f\" % e for e in w]) for w in self.matrix]]) or \"<pusta macierz>\"",
"def __repr__(self):\n st = '\\nProof(syntax=\\\"' + self.syntax + '\\\", formula_list=[\\n'\n for l in self.proof[:-1]:\n st += str(l) + ',\\n'\n return st + str(self.proof[-1]) + '])'",
"def __repr__(self):\n\n nodes = []\n current = self.head\n\n while current:\n if current is self.head:\n nodes.append('[Head: %s]' % current.data)\n elif current.next_node is None:\n nodes.append('[Tail: %s]' % current.data)\n else:\n nodes.append('[%s]' % current.data)\n current = current.next_node\n\n return '-> '.join(nodes)",
"def __repr__(self):\n lines = []\n nodes = [(self.root, 0)]\n while nodes:\n node, indent = nodes.pop()\n name = str(node) if node else 'None'\n lines.append(' ' * indent + name)\n if node:\n nodes.append((node.child[True], indent + 1))\n nodes.append((node.child[False], indent + 1))\n\n return os.linesep.join(lines)",
"def generate_aa_sequence_for_disp(aa_seq):\n return re.sub(\"(.{50})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)",
"def __str__(self):\n stubs = ['' for _ in range(self.nChildren())]\n label = dist = ''\n for i in range(self.nChildren()):\n stubs[i] = str(self.children[i])\n if self.dist or self.dist == 0.0:\n dist = ':' + str(self.dist)\n if self.label != None:\n label = str(self.label)\n if self.nChildren() == 0:\n return label + dist\n else:\n stubstr = '('\n for i in range(len(stubs) - 1):\n stubstr += stubs[i] + ','\n return stubstr + stubs[-1] + ')' + label + dist\n # there is no label\n '''\n if not self.left and self.right:\n return ',' + right\n elif self.left and not self.right:\n return left + ','\n elif self.left and self.right:\n return '(' + left + ',' + right + ')' + dist\n '''",
"def __str__(self):\n return \"->\".join([str(n.data) for n in self.as_list()])",
"def pack_ascii(self):\n\n out = ''\n for w in sorted(self.all_words()):\n assert isinstance(self.value[w], LOTHypothesis), \"*** Can only pack Lexicons with FunctionNode values\"\n out += \"%s:%s;\" % (w, self.value[w].grammar.pack_ascii(self.value[w].value) )\n return out",
"def __repr__(self):\n text = str(self)\n text_ellips = (text[:31] + \"..\" + text[-31:]) if len(text) > 64 else text\n return f\"{self.element_id}. {text_ellips}...\"",
"def dummy_junction13():\n return 'junction:chr1:176-299:+'",
"def _build_nat_string(self, output_interface: str) -> str:\n rule_template = [\n \"POSTROUTING\",\n \"-m\",\n \"mark\",\n \"--mark\",\n f\"{self._mark}\",\n \"-o\",\n output_interface,\n \"-j\",\n \"MASQUERADE\",\n ]\n\n return \" \".join(rule_template)"
] | [
"0.67031217",
"0.590137",
"0.5797911",
"0.5694089",
"0.56807077",
"0.5645143",
"0.55652755",
"0.556261",
"0.5523871",
"0.5515493",
"0.5506121",
"0.550052",
"0.5490149",
"0.54742414",
"0.5463294",
"0.5462326",
"0.54445654",
"0.54246205",
"0.5416303",
"0.54102886",
"0.54089516",
"0.53965",
"0.5371097",
"0.53703153",
"0.53696406",
"0.53609675",
"0.5357852",
"0.5339264",
"0.5330368",
"0.5328892"
] | 0.73432875 | 0 |
gapped sequence nongaps() should return correct array | def test_nongaps(self):
sc = self.SequenceClass
self.assertEqual(sc("TC").nongaps(), array([1, 1]))
self.assertEqual(sc("T-").nongaps(), array([1, 0])) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def get_gaps( rows ):\n\n n = len(rows) - 1\n gaps = [ rows[i+1][0]-rows[i][1] for i in range(n) ]\n return gaps",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def schrage_nlogn(data):\n N = data.copy()\n for i in range(len(data)):\n N[i] = (N[i][0], N[i])\n heapq.heapify(N)\n \"\"\"\"\n mozna to zaltwic przy wczytaniu danych nie wplywa na zloznosc samego algorytmu\n \n N to tablica tablica krotek takich że (r , [r, p,q]), (r1, [r1 ,p1 , q1]) ........\n heapq sortuje po pierwszym elemncie dlatego tak\n \n G analogicznie z tym że sortowane jest malejaco po q więc G = [(q, [r, p ,q ]), (q1, [r1, p1, q1]) .......... ] \n \"\"\"\n G = []\n Pi = []\n t = N[0][0]\n start = timer()\n while len(G) != 0 or len(N) != 0:\n while len(N) != 0 and Schrage.save_min(N) <= t:\n e = heapq.heappop(N)\n heapq.heappush(G, (-e[1][2], e[1])) # O(log n)\n if len(G) != 0:\n e = heapq.heappop(G) # O(log n)\n Pi.append(e[1]) # O(1)\n t = t + e[1][1]\n else:\n t = N[0][0] # O(1)\n end = timer()\n executionTime = end - start\n return Pi, executionTime",
"def gaps(self):\n return self.gaps_L + self.gaps_R",
"def test_insert_gaps_order_invariant():\n gaps1 = insert_gaps(log)\n gaps2 = insert_gaps(log.iloc[[1,0]])\n\n get_gaps = lambda x: x[x['name'] == 'gap']['length'].reset_index(drop=True)\n assert (get_gaps(gaps1) == get_gaps(gaps2.iloc[::-1])).all()",
"def next_rgs(seq, n, k):\n # b[i] = max(seq[i - 1], b[0], ..., b[i - 1]) = max(seq[i - 1], b[i - 1])\n # All restricted growth sequences start with 0\n b = [0]\n result = seq[:]\n for i in range(1, n):\n b.append(max(seq[i - 1], b[i - 1]))\n # Find the earliest index when previous and next sequence are diverging\n for j in range(n - 1, 0, -1):\n if seq[j] + 1 > k:\n continue\n if seq[j] > b[j]:\n continue\n break\n # Create components of new result\n # prefix - maximal common prefix of original and new sequence\n prefix = seq[:j]\n # incremented - the value at j-th place that was incremented\n incremented = seq[j] + 1\n # suffix_length - how many nonzero numbers should we put at the end\n # of new sequence to make it restricted-growing\n # and to have all numbers 0..(k-1) in it.\n suffix_length = k - max(b[j], incremented)\n zeroes = [0] * (n - j - suffix_length - 1)\n suffix = list(range(k - suffix_length + 1, k + 1))\n # Construct new sequence\n result = prefix + [incremented] + zeroes + suffix\n return result",
"def extract_labeled_sequence_gaps(source_seq, test_seq):\n slot_vals = {} \n tmp_gap = []\n prev_word_pos = 0 # the temp value used as a key for the gaps\n pos_in_seq = 0 # position of source_seq of test_seq's current match\n for i, el in enumerate(test_seq):\n if (len(source_seq)-pos_in_seq > len(test_seq)-i) or (pos_in_seq == len(source_seq)):\n return {} \n if el == source_seq[pos_in_seq]:\n # match\n pos_in_seq += 1\n if pos_in_seq != 1 and len(tmp_gap) != 0:\n slot_vals[prev_word_pos] = tmp_gap\n tmp_gap = []\n prev_word_pos = i \n else:\n tmp_gap.append(el)\n if pos_in_seq == len(source_seq):\n return slot_vals\n return {}",
"def get_continous_time_periods(binary_array):\n binary_array = np.copy(binary_array).astype(\"int8\")\n n_times = len(binary_array)\n d_times = np.diff(binary_array)\n # show the +1 and -1 edges\n pos = np.where(d_times == 1)[0] + 1\n neg = np.where(d_times == -1)[0] + 1\n\n if (pos.size == 0) and (neg.size == 0):\n if len(np.nonzero(binary_array)[0]) > 0:\n return [(0, n_times-1)]\n else:\n return []\n elif pos.size == 0:\n # i.e., starts on an spike, then stops\n return [(0, neg[0])]\n elif neg.size == 0:\n # starts, then ends on a spike.\n return [(pos[0], n_times-1)]\n else:\n if pos[0] > neg[0]:\n # we start with a spike\n pos = np.insert(pos, 0, 0)\n if neg[-1] < pos[-1]:\n # we end with aspike\n neg = np.append(neg, n_times - 1)\n # NOTE: by this time, length(pos)==length(neg), necessarily\n h = np.matrix([pos, neg])\n # print(f\"len(h[1][0]) {len(h[1][0])} h[1][0] {h[1][0]} h.size {h.size}\")\n if np.any(h):\n result = []\n for i in np.arange(h.shape[1]):\n if h[1, i] == n_times-1:\n result.append((h[0, i], h[1, i]))\n else:\n result.append((h[0, i], h[1, i]-1))\n return result\n return []",
"def yieldNGEpairs(array):\n stack = container.Stack()\n \n def lessthan(element):\n \"\"\"prevent the top (> element) loss\"\"\"\n def predicate(top):\n if top < element:\n return True\n stack.push(top)\n return False\n return predicate\n \n for element in array:\n if stack.isempty() or stack.top() > element:\n stack.push(element)\n continue\n \n for top in itertools.takewhile(lessthan(element), stack.popall()):\n yield top, element\n stack.push(element)\n\n for top in stack.popall():\n yield top, None",
"def _get_broundary(arr, n_max=16, n_skip=3):\n sub_arr = np.array(arr[n_skip:n_max])\n diffs = sub_arr[1:] - sub_arr[:-1]\n return np.argmin(diffs) + n_skip + 1",
"def bps(seq, ps):\n n = len(list(seq))\n\n if isinstance(ps, int):\n assert n >= ps, \"Not enough sequence size to make break points\"\n return [s[-1] for s in nchunks(seq, ps)][:-1]\n assert n > len(ps), \"Not enough sequence size to make break points\"\n return [seq[round(n * p) - 1] for p in ps]",
"def _sample_pairs_nbp(data, frac, size_cap=np.int(1e6)):\n sample_size = int(len(data) * (len(data) - 1) / 2 * frac)\n sample_size = min(sample_size, size_cap)\n pairs = np.empty((sample_size, 2))\n for i in numba.prange(sample_size):\n pair = np.random.choice(data, size=2, replace=False)\n pair.sort()\n pairs[i] = pair\n return pairs",
"def _no_repeat_ngram(self, tokens, lprobs, bsz: int, beam_size: int, step: int):\n banned_tokens = [\n torch.jit.annotate(List[int], []) for bbsz_idx in range(bsz * beam_size)\n ]\n if step + 2 - self.no_repeat_ngram_size >= 0:\n cpu_tokens: List[List[int]] = tokens.cpu().tolist()\n check_start_pos = step + 2 - self.no_repeat_ngram_size\n for bbsz_idx in range(bsz * beam_size):\n ngram_to_check = cpu_tokens[bbsz_idx][\n -(self.no_repeat_ngram_size - 1) :\n ]\n for i in range(check_start_pos):\n if (\n ngram_to_check\n == cpu_tokens[bbsz_idx][i : i + self.no_repeat_ngram_size - 1]\n ):\n banned_tokens[bbsz_idx].append(\n cpu_tokens[bbsz_idx][i + self.no_repeat_ngram_size - 1]\n )\n for bbsz_idx in range(bsz * beam_size):\n lprobs[bbsz_idx][\n torch.tensor(banned_tokens[bbsz_idx], dtype=torch.int64)\n ] = torch.tensor(-math.inf).to(lprobs)\n return lprobs",
"def break_ties(original):\"\n\n ranks, counts = np.unique(original, return_counts = True)\n cumcounts = np.cumsum(counts)\n \n n = int(np.prod([factorial(c) for c in counts])) # 2!*2!*1!\n out = np.zeros((n, len(original))) # Initialize\n\n for k, rank in enumerate(ranks):\n pos = np.argwhere(original == rank).flatten() \n cycle_perms = it.cycle(it.permutations(pos))\n for row in range(n): \n r = cumcounts[k - 1] if k > 0 else 0\n for p in next(cycle_perms):\n out[row, p] = r\n r += 1\n \n return out",
"def compute_leaps_fast(start, end):\n return compute_leaps_fast_from_0(end)-compute_leaps_fast_from_0(start)",
"def n(l):\n return np.array(l,dtype=object)",
"def initGD( X, N ):\n seq = np.ndarray(len(X), dtype=np.object)\n for i in range( len(X) ):\n a = np.floor(np.linspace(0,N-.00000001,len(X[i])))\n seq[i] = a\n return seq",
"def restricted_growth_sequences(n):\n # k - biggest value that should be contained in subsequence\n for k in range(n):\n # initially seq = [0, 0, ... 0, 1, 2, ..., k-1, k]\n seq = [0] * (n - k) + list(range(1, k+1))\n # final = [0, 1, 2, ..., k-1, k, ..., k]\n final = list(range(k)) + [k] * (n - k)\n while seq != final:\n yield seq\n seq = next_rgs(seq, n, k)\n yield final",
"def prime_gaps(maxp):\n P = prime_range(maxp + 1)\n return [P[i + 1] - P[i] for i in range(len(P) - 1)]",
"def check_gaps(matches, gap_threshold = 0):\n gaps = []\n prev = None\n for match in sorted(matches, key = itemgetter(0)):\n if prev is None:\n prev = match\n continue\n if match[0] - prev[1] >= gap_threshold:\n gaps.append([prev, match])\n prev = match\n return [[i[0][1], i[1][0]] for i in gaps]",
"def make_b_array(n):\n array = np.linspace(-3, 3, n)\n for i, x in enumerate(array[1:-1], start=1):\n if abs(x) < 1:\n array[i] = 2\n else:\n array[i] = 0\n array[0] = 0\n array[n-1] = 0\n\n return array",
"def lntn(sessions):\n mac_aps = defaultdict(list)\n MAC_IDX, AP_IDX = 0,3\n for s in sessions:\n mac, ap = s[MAC_IDX], s[AP_IDX]\n mac_aps[mac].append(ap)\n for k in mac_aps.keys():\n aps = mac_aps[k]\n random.shuffle(aps)\n mac_aps[k] = aps\n out = []\n for s in sessions:\n mac = s[MAC_IDX]\n out.append([s[0], s[1], s[2], mac_aps[mac].pop()])\n return out",
"def pipe_map(n_caps, n_refs, differential=False):\n res = np.array([0]*n_caps + [1]*(n_caps-1), dtype=int)\n res = np.array(list(res[ii:ii+n_caps] for ii in range(n_caps)))\n res = res[::-1, :]\n res = (tuple(res + ii for ii in range(n_refs-1))\n + (np.reshape([n_refs-1]*n_caps, (n_caps, 1,)),) )\n res = np.concatenate(res, axis=1)\n\n if differential:\n res = np.stack((res, res[:, ::-1],), axis=2)\n else:\n res = np.reshape(res, np.shape(res) + (1,))\n\n return res",
"def imputer(seq, n=500):\n cur = len(seq)\n if cur < n:\n return np.concatenate((seq, np.zeros(n - cur)))\n return seq[: n]",
"def naive_grouper(inputs, n):\n num_groups = len(inputs) // n\n return [tuple(inputs[i*n:(i+1)*n]) for i in range(num_groups)]",
"def bshift_1d_nb(a, n):\n out = np.empty_like(a, dtype=np.float_)\n out[-n:] = np.nan\n out[:-n] = a[n:]\n return out",
"def test_gap_array(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1, 0, 1, 1, 0, 0, 0, 1]))\n r = self.RNA(\"AC\")\n v = r.gap_array()\n self.assertEqual(v, array([0, 0]))\n r = self.RNA(\"-?\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1]))"
] | [
"0.61690015",
"0.6166339",
"0.5894974",
"0.58692527",
"0.58428276",
"0.57104343",
"0.5640709",
"0.554734",
"0.5539277",
"0.55391294",
"0.55129206",
"0.5431471",
"0.5401227",
"0.5352029",
"0.53116506",
"0.5305194",
"0.5293791",
"0.5287529",
"0.5271896",
"0.5259445",
"0.52539176",
"0.52415377",
"0.521012",
"0.5185087",
"0.51675045",
"0.5159283",
"0.51523376",
"0.5148039",
"0.51441616",
"0.51308405"
] | 0.7228265 | 0 |
gapped sequence regap() should return correct sequence | def test_regap(self):
sc = self.SequenceClass
self.assertEqual(str(sc("TC").regap(sc("A---A-"))), "T---C-") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_gaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").gaps(), array([0, 0]))\n self.assertEqual(sc(\"T-\").gaps(), array([0, 1]))",
"def group(seq):\n pass # replace with your solution",
"def check_gapped(sequence):\n w_regexp = re.compile('n|N')\n regexp_obj = w_regexp.search(sequence)\n if (regexp_obj):\n return True\n else:\n return False",
"def test_godel_number_to_sequence():\n\tassert godel_number_to_sequence(1) == ()\n\tassert godel_number_to_sequence(2) == (1,)\n\tassert godel_number_to_sequence(3) == (0, 1)\n\tassert godel_number_to_sequence(2250) == (1, 2, 3)",
"def xseq2yseq_singletrial(self,xseq):\n # sequence for ongoing task\n og_xseq = xseq[self.ntokens_pm:-1]\n og_xseq_roll = np.roll(og_xseq,self.nback)\n og_yseq = np.concatenate([\n np.zeros(self.nback),\n (og_xseq[self.nback:] == og_xseq_roll[self.nback:]).astype(int)])\n # determine final pm response\n pm_responses_encode = np.arange(2,2+self.ntokens_pm)\n pm_response_final = list(xseq).index(xseq[-1])+2\n # concatenate\n yseq = np.concatenate([\n pm_responses_encode,\n og_yseq,\n [pm_response_final]\n ])\n return yseq",
"def test_includinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=True)\n assert len(model.get_alphabet()) == 5",
"def _setup_next_sequence(cls):\n return 0",
"def findGaps(sequence):\n patt = re.compile(r\"[Nn]+\")\n for match in patt.finditer(sequence):\n yield (match.start(), match.end())",
"def sequence_predict(self,sess,seq_xs,dropout,seqlen,seq_ys = []):\n if len(seq_ys) > 0:\n # since we have 1 sequence\n seq_ys = seq_ys[0,0:seqlen[0]]\n y = np.zeros([seqlen[0],2])\n y[np.arange(0,seqlen[0]),np.array(seq_ys,dtype=np.int32)] = 1\n\n \"\"\"cut spare entries of xs (added by the reader)\"\"\"\n seq_xs = seq_xs[0:seqlen[0],:]\n \n cost = -1\n if len(seq_ys) > 0:\n llogits, predictions,cost = sess.run( [self.logits, self.predictions,self.cost], feed_dict={self.x: seq_xs, self.y: y, \n self.keep_prob: dropout}) \n else:\n llogits,predictions = sess.run( [self.logits, self.predictions], feed_dict={self.x: seq_xs, \n self.keep_prob: dropout}) \n \n \n seq_prediction = np.sum(predictions) >= seqlen[0]/2.\n \n #if seq_ys is provided, then output also correct predictions\n corr_preds = []\n if len(seq_ys) > 0:\n corr_preds = (seq_ys[0] == seq_prediction)\n\n return np.sum(llogits,0),seq_prediction, [corr_preds], cost",
"def test_to_phylip(self):\n s = self.SequenceClass(\"ACG\", name=\"xyz\")\n self.assertEqual(s.to_phylip(), \"xyz\" + \" \" * 27 + \"ACG\")",
"def setseq():\n\n if seqRnd[0]:\n r = getrndseq(seqRndWeight)\n if seqRnd[0] == 1:\n gbl.seqCount = r\n r = -1\n else:\n r = -1\n\n return ( r, seqRnd[1:] )",
"def test_sequence(self):\n self.assertEqual([1, -3, 9, -27, 81, -243],\n [x for x in GeometricProgression(6, 1, -3)])\n\n self.assertEqual([1, 1, 1, 1, 1],\n [x for x in GeometricProgression(5, 1, 1)])\n\n self.assertEqual([4, 40, 400, 4000, 40000],\n [x for x in GeometricProgression(5, 4, 10)])",
"def gru_seq2seq_internal(inputs, targets, hparams, train):\n with tf.variable_scope(\"gru_seq2seq\"):\n if inputs is not None:\n inputs_length = common_layers.length_from_embedding(inputs)\n # Flatten inputs.\n inputs = common_layers.flatten4d3d(inputs)\n inputs = tf.reverse_sequence(inputs, inputs_length, seq_axis=1)\n _, final_encoder_state = gru(inputs, inputs_length, hparams, train,\n \"encoder\")\n else:\n final_encoder_state = None\n\n shifted_targets = common_layers.shift_right(targets)\n # Add 1 to account for the padding added to the left from shift_right\n targets_length = common_layers.length_from_embedding(shifted_targets) + 1\n decoder_outputs, _ = gru(\n common_layers.flatten4d3d(shifted_targets),\n targets_length,\n hparams,\n train,\n \"decoder\",\n initial_state=final_encoder_state)\n return tf.expand_dims(decoder_outputs, axis=2)",
"def test_sequence(self, output, input_):\n input_ = \"\\n\".join(input_)\n g = Genes(input_)\n s = Sequence(genes=g, ages=g.size)\n s.run()\n self.assertEquals(s.population.get_survivor(Sequence.IMPOSSIBLE),\n output)",
"def encode(self, seq):",
"def test_degap(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"T-\").degap(), sc(\"T\"))",
"def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map",
"def test_excludeinggaps(self):\n model = substitution_model.TimeReversibleNucleotide(model_gaps=False)\n assert len(model.get_alphabet()) == 4",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def extend_seq(mrnaseq, mrna_frag, total_length=50):\n #\n # Prepare sequences with no gaps\n #\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n #\n # check if the sequence is shorter\n #\n if len(mrna_frag_nogap) > total_length:\n syserr(\"mrnaseq_nogap: \", mrnaseq_nogap)\n syserr(\"mrna_frag_nogap: \", mrna_frag_nogap)\n syserr(\"mrnaseq: \", mrnaseq)\n syserr(\"mrna_frag: \", mrna_frag)\n raise Exception(\n \"Check your sequences maybe you should shrink, not extend them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean([i for i, x in enumerate(mrna_frag) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the extension of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length > total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = total_length - length\n quot = dif // 2 # this is explicit integer division\n l_ext = li - quot # TODO check if they are not lower than 0\n u_ext = ui + (dif - quot)\n if (l_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # extend left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n if (li - dif < 0):\n return mrnaseq_nogap[:ui + abs(li - dif)]\n else:\n return mrnaseq_nogap[li - dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = total_length - len(mrna_frag_nogap)\n # if there is noting to extend to the right\n if ui + dif > len(mrnaseq_nogap):\n return mrnaseq_nogap[li - ((ui + dif) - len(mrnaseq_nogap)):]\n else:\n return mrnaseq_nogap[li:ui + dif]",
"def increment(sequence, seq):\n seq.rollover_len = len(sequence) + 1\n seq.value = sequence[::-1]\n return (SeqGen.next(seq)[::-1])",
"def test_check_seqs_added_demultiplex(self):\r\n\r\n # Test added demultiplex for the run_prefix\r\n in_seqs = self.in_seqs_added_demultiplex\r\n bc_map = self.bc_map_added_demultiplex\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_fasta_fixed_added_demultiplex\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='disable',\r\n rev_primers={},\r\n qual_out=False,\r\n added_demultiplex_field='run_prefix')\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)\r\n\r\n # Demultiplex by the 'group' in the fasta label\r\n in_seqs = self.in_seqs_added_demultiplex\r\n bc_map = self.bc_map_added_demultiplex_group\r\n primer_seq_lens = self.primer_seq_lens_fixed_len_bc1\r\n all_primers = self.all_primers_fixed_len_bc1\r\n expected = self.expected_fasta_added_demultiplex_group\r\n\r\n fd, out_fp = mkstemp(prefix=\"sample_seqs_\", suffix=\".fna.tmp\")\r\n close(fd)\r\n out_f = open(out_fp, \"w\")\r\n self._files_to_remove.append(out_f.name.replace('.tmp', ''))\r\n\r\n actual = check_seqs(\r\n fasta_out=out_f,\r\n fasta_files=[in_seqs],\r\n starting_ix=0,\r\n valid_map=bc_map,\r\n qual_mappings={},\r\n filters=[],\r\n barcode_len=12,\r\n keep_primer=False,\r\n keep_barcode=False,\r\n barcode_type=\"golay_12\",\r\n max_bc_errors=1.5,\r\n retain_unassigned_reads=False,\r\n attempt_bc_correction=True,\r\n primer_seqs_lens=primer_seq_lens,\r\n all_primers=all_primers,\r\n max_primer_mm=0,\r\n disable_primer_check=False,\r\n reverse_primers='disable',\r\n rev_primers={},\r\n qual_out=False,\r\n added_demultiplex_field='group')\r\n\r\n out_f = open(out_f.name.replace('.tmp', ''), \"U\")\r\n actual_results = '\\n'.join([line.strip() for line in out_f])\r\n\r\n self.assertEqual(actual_results, expected)",
"def shrink_seq(mrnaseq, mrna_frag, mrna_frag_target, total_length=50):\n # Prepare sequences with no gaps\n mrnaseq_nogap = mrnaseq.replace(\"-\", \"\")\n mrna_frag_nogap = mrna_frag.replace(\"-\", \"\")\n if len(mrna_frag_nogap) < total_length:\n syserr(mrna_frag_nogap)\n syserr(mrnaseq)\n syserr(mrna_frag)\n syserr(mrna_frag_target)\n raise Exception(\n \"Check your sequences maybe you should extend, not shrink them\")\n span = re.search(mrna_frag_nogap, mrnaseq_nogap).span()\n\n # Decide which type of extension to do\n gap_pos_mean = mean(\n [i for i, x in enumerate(mrna_frag_target) if x == \"-\"])\n list_median = median([i for i in range(len(mrna_frag_target))])\n\n # this ratio gives us relative position of the gaps\n ratio = gap_pos_mean / list_median\n\n # Based on the ratio do the shrinkage of the sequence\n if ratio > 0.5 and ratio < 1.5: # extend both sides\n li = span[0]\n ui = span[1]\n length = ui - li\n if length < total_length:\n return -1\n elif length == total_length:\n return mrnaseq_nogap[li:ui]\n else:\n dif = abs(total_length - length)\n quot = dif // 2 # this is explicit integer division\n l_ext = li + quot\n u_ext = ui - (dif - quot)\n if (u_ext < 0) or (u_ext > len(mrnaseq_nogap) - 1):\n return \"NA\"\n else:\n return mrnaseq_nogap[l_ext:u_ext]\n elif ratio <= 0.5: # trim left - it means upstream (5'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li + dif:ui]\n elif ratio >= 1.5: # extend right - it means downstream (3'end)\n li = span[0]\n ui = span[1]\n length = ui - li\n dif = len(mrna_frag_nogap) - total_length\n return mrnaseq_nogap[li:ui - dif]",
"def gen_seq(self,ntrials=20,pm_trial_position=None):\n # insert ranomly positioned pm trials\n if type(pm_trial_position)==type(None):\n ntrials -= 1+self.num_pm_trials\n pm_trial_position = np.random.randint(self.min_start_trials,ntrials,self.num_pm_trials) \n else:\n ntrials -= 1+len(pm_trial_position)\n pm_trial_position = pm_trial_position\n # generate og stim\n seq = np.random.randint(0,self.ntokens_og,ntrials)\n X = np.insert(seq,[0,*pm_trial_position],self.pm_token)\n # form Y \n Xroll = np.roll(X,self.nback)\n Y = (X == Xroll).astype(int) # nback trials\n Y[X==self.pm_token]=2 # pm trials\n return X,Y",
"def test_nongaps(self):\n sc = self.SequenceClass\n self.assertEqual(sc(\"TC\").nongaps(), array([1, 1]))\n self.assertEqual(sc(\"T-\").nongaps(), array([1, 0]))",
"def gen_xseq_singletrial(self,min_trial_len=2,max_trial_len=3):\n # random length OG task seq\n trial_len = np.random.randint(min_trial_len,max_trial_len+1)\n task_seq = np.random.randint(0,self.ntokens_og,trial_len)\n # pm stim\n pm_cue_encode_seq = np.arange(self.ntokens_og,self.ntokens_pm+self.ntokens_og)\n np.random.shuffle(pm_cue_encode_seq)\n trial_pm_stim = np.random.choice(pm_cue_encode_seq)\n # concat\n seq = np.concatenate([\n pm_cue_encode_seq,\n task_seq,\n [trial_pm_stim]\n ])\n return seq",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence"
] | [
"0.61801946",
"0.6072817",
"0.5821261",
"0.57876194",
"0.5663991",
"0.5558589",
"0.55302256",
"0.55173",
"0.5506784",
"0.5503168",
"0.54316753",
"0.53699327",
"0.5357166",
"0.5355198",
"0.53514254",
"0.5329967",
"0.53242975",
"0.5302287",
"0.52581125",
"0.52402294",
"0.52292484",
"0.5223534",
"0.5217455",
"0.5214847",
"0.5210252",
"0.5208791",
"0.52029353",
"0.52029353",
"0.52029353",
"0.52029353"
] | 0.6927464 | 0 |
Regular sequence should convert to model sequence | def test_regular_to_model(self):
r = RNA.make_seq("AAA", name="x")
s = RNA.make_array_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def encode(self, seq):",
"def test_godel_number_to_sequence():\n\tassert godel_number_to_sequence(1) == ()\n\tassert godel_number_to_sequence(2) == (1,)\n\tassert godel_number_to_sequence(3) == (0, 1)\n\tassert godel_number_to_sequence(2250) == (1, 2, 3)",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def test_regular_to_regular(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def test_ModelRnaCodonSequence(self):\n r = ArrayRnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(r), \"UUUCGU\")\n self.assertEqual(r._data, array([0, 28]))\n self.assertEqual(str(r.to_rna()), \"UUUCGU\")\n self.assertEqual(str(r.to_dna()), \"TTTCGT\")",
"def test_ModelDnaCodonSequence(self):\n d = ArrayDnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(d), \"TTTCGT\")\n self.assertEqual(d._data, array([0, 28]))\n self.assertEqual(str(d.to_rna()), \"UUUCGU\")\n self.assertEqual(str(d.to_dna()), \"TTTCGT\")",
"def pack(cls, seq):\n return cls.sequence(filter(_is_not_nothing, seq))",
"def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]",
"def seq(self): # (pure virtual)\n raise NotImplementedError",
"def model_to_sequencesql(self, m):\n from django.db import connection\n\n # tbl has app_label prefix; e.g., testapp_simple\n tbl = m._meta.db_table\n\n # Get name of sequence for this table. Here's\n # a trace from doing it manually.\n #\n # sql> select \"default\" from sys.columns\n # more> where table_id = 4186 and name = 'id';\n # +-------------------------------------+\n # | default |\n # +=====================================+\n # | next value for \"django1\".\"seq_4176\" |\n # +-------------------------------------+\n # 1 tuple\n # sql>\n #\n\n c = connection.cursor()\n fmt = \"\"\"\nSELECT\n \"default\"\nFROM\n sys.columns\nWHERE\n table_id = (SELECT id FROM sys.tables where name = %s) AND\n name = 'id'\n;\n\"\"\"\n c.execute(fmt, [tbl, ])\n row = c.fetchone()\n # default = 'next value for \"django1\".\"seq_4176\"'\n default = row[0]\n p = default.rfind('\".\"seq_')\n if p == -1:\n return ''\n\n # seq = '\"seq_4176\"'\n seq = default[p + 2:]\n\n fmt = 'ALTER SEQUENCE %s RESTART WITH (SELECT MAX(id) + 1 FROM %s);'\n\n return fmt % (seq, tbl)",
"def _process_action_seq(sequence, length=15, new_model=True):\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence",
"def TestSeq2Seq(source_seq, target_seq_in, target_seq_out): \r\n loss = 0\r\n pred = []\r\n decoder_length = target_seq_out.shape[1]\r\n # Encode the source.\r\n encoder_outputs = encoder(source_seq)\r\n states = encoder_outputs[1:]\r\n # Decoder predicts the target_seq.\r\n decoder_in = tf.expand_dims(target_seq_in[:, 0], 1)\r\n for t in range(decoder_length):\r\n logit, de_state_h, de_state_c= decoder(decoder_in, states)\r\n decoder_in = tf.expand_dims(logit, 1)\r\n states = de_state_h, de_state_c\r\n # loss function : RSME TODO\r\n loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)# TODO\r\n \r\n loss = tf.reduce_mean(loss) \r\n loss = loss / decoder_length\r\n return loss",
"def convert_to_model(self, *args):",
"def _check_sequence(self) -> PossibleResult[T]:\n if isinstance(self.constructor_origin, type) and issubclass(\n self.constructor_origin, Sequence\n ):\n if not isinstance(self.obj, Sequence):\n raise DeserializeError(\n Sequence, self.obj, self.new_depth, self.key\n )\n if self.constructor_args:\n _arg = self.constructor_args[0]\n else:\n _arg = Any # type: ignore\n return self.constructor_origin(\n Deserialize(\n obj=value,\n constructor=_arg,\n depth=self.new_depth,\n convert_primitives=self.convert_primitives,\n ).run()\n for value in self.obj\n ) # type: ignore\n return NO_RESULT",
"def get_seq(self): # -> list[Unknown]:\n ...",
"def test_to_phylip(self):\n s = self.SequenceClass(\"ACG\", name=\"xyz\")\n self.assertEqual(s.to_phylip(), \"xyz\" + \" \" * 27 + \"ACG\")",
"def TestAttentionSeq2Seq(source_seq, target_seq_in, target_seq_out): \r\n loss = 0\r\n decoder_length = target_seq_out.shape[1]\r\n # Encode the source.\r\n encoder_outputs = encoder_a(source_seq)\r\n states = encoder_outputs[1:]\r\n history = encoder_outputs[0]\r\n # Decoder predicts the target_seq.\r\n decoder_in = tf.expand_dims(target_seq_in[:, 0], 1)\r\n for t in range(decoder_length):\r\n logit, lstm_out, de_state_h, de_state_c, _= decoder_a(decoder_in, states, history)\r\n decoder_in = tf.expand_dims(logit, 1)\r\n history_new = tf.expand_dims(lstm_out, 1)\r\n history = tf.concat([history[:, 1:], history_new], 1)\r\n states = de_state_h, de_state_c\r\n # loss function : RSME \r\n loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)\r\n \r\n loss = tf.reduce_mean(loss) \r\n loss = loss / decoder_length\r\n return loss",
"def sequence_params(self):",
"def transform(self, x): # takes no other parameters (use fields initialized in constructor instead).\n if self.do_clean:\n x = self.clean(x)\n if self.tokenizer is None:\n raise ValueError('Tokenizer has not been initialized.')\n # other transforming to produce tensor for input layer of model\n x = self.tokenizer.texts_to_sequences(x)\n return pad_sequences(x, maxlen=self.max_sequence_length, padding=self.pad_type, truncating=self.trunc_type,\n value=0)",
"def predict(self, seq):\n raise Exception(\"You cannot predict with a base predictor.\")",
"def translate(self) -> Seq:\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)",
"def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))",
"def MakeSeq(self,content):\n return self.register(Seq(content,reg=self))",
"def testSeqDefaults(self):\n self.assertEqual(\n None,\n self.mr._is_seq\n )\n\n self.assertEqual(\n None,\n self.mr._sequences\n )",
"def sequence(self) -> Any:\n return self.__seq",
"def sequence(seq, limits=None):\n seq = sympify(seq)\n\n if is_sequence(seq, Tuple):\n return SeqPer(seq, limits)\n else:\n return SeqFormula(seq, limits)"
] | [
"0.732975",
"0.70638686",
"0.6560123",
"0.65202504",
"0.628081",
"0.6274271",
"0.62084633",
"0.617465",
"0.59855276",
"0.58972305",
"0.5821316",
"0.58064896",
"0.57611835",
"0.5760904",
"0.57509965",
"0.5748884",
"0.5734916",
"0.5701258",
"0.56332576",
"0.5625062",
"0.5587976",
"0.5579556",
"0.5570987",
"0.5555264",
"0.5533339",
"0.5521596",
"0.55116236",
"0.54978496",
"0.5480397",
"0.54741937"
] | 0.74114156 | 0 |
Model sequence should convert to regular sequence | def test_model_to_regular(self):
r = RNA.make_array_seq("AAA", name="x")
s = RNA.make_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_model(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def encode(self, seq):",
"def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence",
"def test_godel_number_to_sequence():\n\tassert godel_number_to_sequence(1) == ()\n\tassert godel_number_to_sequence(2) == (1,)\n\tassert godel_number_to_sequence(3) == (0, 1)\n\tassert godel_number_to_sequence(2250) == (1, 2, 3)",
"def model_to_sequencesql(self, m):\n from django.db import connection\n\n # tbl has app_label prefix; e.g., testapp_simple\n tbl = m._meta.db_table\n\n # Get name of sequence for this table. Here's\n # a trace from doing it manually.\n #\n # sql> select \"default\" from sys.columns\n # more> where table_id = 4186 and name = 'id';\n # +-------------------------------------+\n # | default |\n # +=====================================+\n # | next value for \"django1\".\"seq_4176\" |\n # +-------------------------------------+\n # 1 tuple\n # sql>\n #\n\n c = connection.cursor()\n fmt = \"\"\"\nSELECT\n \"default\"\nFROM\n sys.columns\nWHERE\n table_id = (SELECT id FROM sys.tables where name = %s) AND\n name = 'id'\n;\n\"\"\"\n c.execute(fmt, [tbl, ])\n row = c.fetchone()\n # default = 'next value for \"django1\".\"seq_4176\"'\n default = row[0]\n p = default.rfind('\".\"seq_')\n if p == -1:\n return ''\n\n # seq = '\"seq_4176\"'\n seq = default[p + 2:]\n\n fmt = 'ALTER SEQUENCE %s RESTART WITH (SELECT MAX(id) + 1 FROM %s);'\n\n return fmt % (seq, tbl)",
"def sequence_params(self):",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def _process_action_seq(sequence, length=15, new_model=True):\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence",
"def seq(self): # (pure virtual)\n raise NotImplementedError",
"def get_seq(self): # -> list[Unknown]:\n ...",
"def sequence(self) -> Any:\n return self.__seq",
"def generate_sequence(scaler, model, x_sample, future=100):\r\n y_pred_tensor = model(x_sample, future=future)\r\n y_pred = y_pred_tensor.cpu().tolist()\r\n y_pred = scaler.inverse_transform(y_pred)\r\n return y_pred",
"def convert_to_model(self, *args):",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence"
] | [
"0.6850945",
"0.66729575",
"0.6485937",
"0.6349795",
"0.6317251",
"0.61167735",
"0.60555464",
"0.6052192",
"0.5994143",
"0.5922497",
"0.59025365",
"0.58883065",
"0.58029574",
"0.5746057",
"0.5729786",
"0.57184625",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364",
"0.5694364"
] | 0.6829134 | 1 |
Regular sequence should convert to regular sequence | def test_regular_to_regular(self):
r = RNA.make_seq("AAA", name="x")
s = RNA.make_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def encode(self, seq):",
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def transform_seq(seq):\n # TODO add character checking based on ASCII code\n return \"\".join(\"\" if aa in msa_characters else aa for aa in seq)",
"def test_godel_number_to_sequence():\n\tassert godel_number_to_sequence(1) == ()\n\tassert godel_number_to_sequence(2) == (1,)\n\tassert godel_number_to_sequence(3) == (0, 1)\n\tassert godel_number_to_sequence(2250) == (1, 2, 3)",
"def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence",
"def pack(cls, seq):\n return cls.sequence(filter(_is_not_nothing, seq))",
"def prepseq(self, seq):\n\n wtf = re.sub(r'\\*$', '', seq)\n return wtf",
"def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]",
"def to_rna(seq):\n seq=seq.replace('A','U')\n seq=seq.replace('T','A')\n seq=seq.replace('C',\"P\")\n seq=seq.replace('G','C')\n seq=seq.replace('P','G')\n return seq",
"def simple_seq(seq):\n for i in seq:\n yield i",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def _is_proper_sequence(seq):\n return (isinstance(seq, collections.abc.Sequence) and\n not isinstance(seq, str))",
"def fix_seq(self, fixed_seq):\n self.wc.fix_seq(wc(fixed_seq))",
"def uninferable(seq):\n return reversed(seq)",
"def decode_sequence(self, sequence=list) -> str:\n try:\n out = []\n for word in sequence:\n out.append(self.decode(word))\n return(out)\n except Exception as error:\n print(f\"Error: self.decode_sequence({sequence}) -> {error}\")",
"def normalization(seq):\n\t\tnew_seq = [6.3578286171 * x for x in seq]\n\t\treturn new_seq",
"def transcribe(self):\n self.sequence = self.sequence.replace(\"T\",\"U\")\n return",
"def _process_action_seq(sequence, length=15, new_model=True):\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence",
"def translate(self) -> Seq:\n AA = \"\".join(\n self.codons[self.sequence[i : i + 3]]\n for i in range(0, len(self.sequence), 3)\n if self.codons[self.sequence[i : i + 3]] != \"Stop\"\n )\n return Seq(AA, self.id)",
"def transformation_seq(self, sequence: str):\n\n # Add '$' after the sequence\n seq = sequence.upper() + \"$\"\n\n # Initialization of the square matrix of all the offsets of the sequence\n seq_matrix = [seq]\n\n previous_seq = seq\n\n # Filling of the square matrix\n for i in range(0, len(seq)-1, 1):\n next_seq = previous_seq[len(seq)-1] + previous_seq[0:len(seq)-1]\n # Complete list for print step by step\n self.list_step_trans_seq.append(next_seq)\n seq_matrix.append(next_seq)\n previous_seq = next_seq\n\n # Sorting the square matrix and display\n self.sort_and_print_matrix(seq_matrix, self.list_el_matrix_final_trans)\n\n # Recovering the last character of each line\n bwt = \"\"\n\n for line in seq_matrix:\n bwt += line[len(line)-1]\n\n self.save(bwt)\n\n return bwt",
"def squeeze_seq(seq):\r\n\r\n return sub(r'([AGCTacgt])\\1+', '\\\\1', seq)",
"def test_convert_input(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n aligned_seq = Aligned(m, seq)\n mapped_gap, new_seq = _convert_input(aligned_seq, None)\n self.assertIs(new_seq.moltype, DNA)\n self.assertIs(mapped_gap, m)\n self.assertIs(new_seq, seq)\n mapped_gap, new_seq = _convert_input(\"ACGGT--A\", DNA)\n self.assertEqual(str(mapped_gap), str(m))\n self.assertEqual(str(new_seq), str(seq))",
"def invert_zero_one(sequence):\n return [1 - code for code in sequence]",
"def test_translate(self):\n \n orf = 'ATGTGGAGACGGAAACATCCGAGGACATCCGGAGGAACCCGGGGAGTTCTGAGTGGTAATTAG'\n expected_primers = Seq('MWRRKHPRTSGGTRGVLSGN*', HasStopCodon(IUPAC.ExtendedIUPACProtein(), '*'))\n result_primers = translate(orf)\n self.assertEqual(result_primers, expected_primers)\n self.assertEqual(len(result_primers), 21)\n self.assertEqual(isinstance(result_primers, Seq), True)",
"def test_squeeze_seq(self):\r\n\r\n seq = \"AAAGGGAAACCCGGGA\"\r\n self.assertEqual(squeeze_seq(seq), \"AGACGA\")\r\n self.assertEqual(squeeze_seq(\"AAAATATTTAGGC\"), \"ATATAGC\")\r\n self.assertEqual(squeeze_seq(\"\"), \"\")\r\n self.assertEqual(squeeze_seq(\"ATGCATGCATGC\"), \"ATGCATGCATGC\")",
"def backtranslate(p_seq, n_seq):\r\n # Keep track of the new sequence. Also keep track of which codon we are\r\n # actually processing (gaps don't count)\r\n newseq = ''\r\n codon = 0\r\n for aa in p_seq:\r\n if aa == '-':\r\n newseq += '---'\r\n else:\r\n newseq += n_seq[codon*3:(codon*3) + 3]\r\n codon += 1\r\n return newseq",
"def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq",
"def test_to_phylip(self):\n s = self.SequenceClass(\"ACG\", name=\"xyz\")\n self.assertEqual(s.to_phylip(), \"xyz\" + \" \" * 27 + \"ACG\")",
"def test_ModelRnaCodonSequence(self):\n r = ArrayRnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(r), \"UUUCGU\")\n self.assertEqual(r._data, array([0, 28]))\n self.assertEqual(str(r.to_rna()), \"UUUCGU\")\n self.assertEqual(str(r.to_dna()), \"TTTCGT\")"
] | [
"0.6617936",
"0.6383945",
"0.6319407",
"0.62932134",
"0.62242186",
"0.6218362",
"0.61613417",
"0.61074924",
"0.60580015",
"0.5998588",
"0.5887306",
"0.586777",
"0.58568335",
"0.5827881",
"0.57606894",
"0.5693285",
"0.56801903",
"0.56508076",
"0.5638858",
"0.56294554",
"0.56237996",
"0.5565943",
"0.5562769",
"0.55606204",
"0.5543088",
"0.5536609",
"0.55262846",
"0.5520743",
"0.551549",
"0.5509812"
] | 0.6652918 | 0 |
Model sequence should convert to model sequence | def test_model_to_model(self):
r = RNA.make_array_seq("AAA", name="x")
s = RNA.make_array_seq(r)
self.assertEqual(str(s), "AAA")
self.assertEqual(s.moltype, RNA)
self.assertEqual(s.name, "x") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def convert_to_model(self, *args):",
"def encode(self, seq):",
"def to_seq_record(self):\n\t\t#create the anotations in a pythonic manner\n\t\texempt = ['name', 'description', 'features', 'sequence'] #things which aren't annotations\n\t\tannotations = { }\n\t\tfor key, value in self.__dict__.iteritems():\n\t\t\tif key.lower() not in exempt:\n\t\t\t\tannotations[key] = value\n\t\t\n\t\t#create the features\n\t\tfeatures = []\n\t\tfor feat in self.features:\n\t\t\tfeatures.append( SeqFeature( \n\t\t\t\tlocation = FeatureLocation(feat['startpos'] - 1, feat['endpos']), #NB partsregistry uses 1-offset, and inclusive.\n\t\t\t\ttype = feat['type'],\n\t\t\t\tstrand = feat['strand'],\n\t\t\t\tqualifiers = {'title': feat['name'],}))\n\t\t\n\t\treturn SeqRecord(\tself.sequence, \n\t\t\t\t\t\t\tid=self.name,\n\t\t\t\t\t\t\tname=self.name,\n\t\t\t\t\t\t\tdescription=self.description,\n\t\t\t\t\t\t\tfeatures=features,\n\t\t\t\t\t\t\tannotations=annotations)",
"def test_godel_number_to_sequence():\n\tassert godel_number_to_sequence(1) == ()\n\tassert godel_number_to_sequence(2) == (1,)\n\tassert godel_number_to_sequence(3) == (0, 1)\n\tassert godel_number_to_sequence(2250) == (1, 2, 3)",
"def sequence_params(self):",
"def model_to_sequencesql(self, m):\n from django.db import connection\n\n # tbl has app_label prefix; e.g., testapp_simple\n tbl = m._meta.db_table\n\n # Get name of sequence for this table. Here's\n # a trace from doing it manually.\n #\n # sql> select \"default\" from sys.columns\n # more> where table_id = 4186 and name = 'id';\n # +-------------------------------------+\n # | default |\n # +=====================================+\n # | next value for \"django1\".\"seq_4176\" |\n # +-------------------------------------+\n # 1 tuple\n # sql>\n #\n\n c = connection.cursor()\n fmt = \"\"\"\nSELECT\n \"default\"\nFROM\n sys.columns\nWHERE\n table_id = (SELECT id FROM sys.tables where name = %s) AND\n name = 'id'\n;\n\"\"\"\n c.execute(fmt, [tbl, ])\n row = c.fetchone()\n # default = 'next value for \"django1\".\"seq_4176\"'\n default = row[0]\n p = default.rfind('\".\"seq_')\n if p == -1:\n return ''\n\n # seq = '\"seq_4176\"'\n seq = default[p + 2:]\n\n fmt = 'ALTER SEQUENCE %s RESTART WITH (SELECT MAX(id) + 1 FROM %s);'\n\n return fmt % (seq, tbl)",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def reconstruct_input_ext(self, model_in):",
"def preprocess_inde(self, sequence, src_seq):\r\n sequence = sequence + [len(src_seq) - 1] # add sen\r\n sequence = torch.Tensor(sequence)\r\n return sequence",
"def sequential_model():\n model = build_models()\n seq_model = Sequential(model[0]['layers'], name=model[0]['name'])\n return seq_model",
"def s2s_model(self):\r\n \r\n model = AttentionSeq2Seq(input_dim=self.input_dim, input_length=self.input_len, \r\n hidden_dim=16, output_length=self.output_len, \r\n output_dim=self.output_dim, depth=(1,1),\r\n stateful=False, dropout=0.5)\r\n model.compile(loss='mape', optimizer='adam', metrics=['mse'])\r\n model.fit(self.train_X, self.train_Y, epochs=75, verbose=2, shuffle=True)\r\n\r\n return model",
"def transform(self):",
"def get_seq(self): # -> list[Unknown]:\n ...",
"def _sequences_to_new_records(sequences):\n if isinstance(sequences, dict):\n sequences = list(sequences.items())\n records = []\n for seq in sequences:\n if hasattr(seq, \"id\"):\n records.append(deepcopy(seq))\n else:\n name, seq = seq\n records.append(\n sequence_to_biopython_record(seq, id=name, name=name)\n )\n return records",
"def generate_sequence(scaler, model, x_sample, future=100):\r\n y_pred_tensor = model(x_sample, future=future)\r\n y_pred = y_pred_tensor.cpu().tolist()\r\n y_pred = scaler.inverse_transform(y_pred)\r\n return y_pred",
"def seq(self): # (pure virtual)\n raise NotImplementedError",
"def _process_action_seq(sequence, length=15, new_model=True):\n sequence = sequence[-length:].copy()\n if new_model:\n sequence = sequence[::-1]\n if len(sequence) < length:\n empty_sequence = [[] for _ in range(length - len(sequence))]\n empty_sequence.extend(sequence)\n sequence = empty_sequence\n return sequence",
"def convert2seq(self,seq_int):\n\t\treturn [self.aminoacids[i] for i in seq_int]",
"def __init__(self):\n\n self.sequence = []",
"def instance_to_model(self):\n pass",
"def TestSeq2Seq(source_seq, target_seq_in, target_seq_out): \r\n loss = 0\r\n pred = []\r\n decoder_length = target_seq_out.shape[1]\r\n # Encode the source.\r\n encoder_outputs = encoder(source_seq)\r\n states = encoder_outputs[1:]\r\n # Decoder predicts the target_seq.\r\n decoder_in = tf.expand_dims(target_seq_in[:, 0], 1)\r\n for t in range(decoder_length):\r\n logit, de_state_h, de_state_c= decoder(decoder_in, states)\r\n decoder_in = tf.expand_dims(logit, 1)\r\n states = de_state_h, de_state_c\r\n # loss function : RSME TODO\r\n loss_0 = tf.keras.losses.MSE(target_seq_out[:, t, 1:3], logit[:, 1:3])\r\n loss += tf.sqrt(loss_0)# TODO\r\n \r\n loss = tf.reduce_mean(loss) \r\n loss = loss / decoder_length\r\n return loss",
"def sequence_reset_sql(self, style, model_list):\n results = []\n for model in model_list:\n sql = self.model_to_sequencesql(model)\n if sql:\n results.append(sql)\n return results",
"def sequence(self) -> Any:\n return self.__seq",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence",
"def _get_sequence(self):\n return self.__sequence"
] | [
"0.6928161",
"0.6812833",
"0.65000093",
"0.60420865",
"0.60281956",
"0.59811866",
"0.5976736",
"0.58936715",
"0.5847981",
"0.5823246",
"0.5763527",
"0.57178855",
"0.5535258",
"0.55170923",
"0.54954267",
"0.548045",
"0.5480217",
"0.5442175",
"0.54339",
"0.54253",
"0.54149",
"0.5399543",
"0.5396384",
"0.53947765",
"0.536948",
"0.5358486",
"0.5358486",
"0.5358486",
"0.5358486",
"0.5358486"
] | 0.7089526 | 0 |
ArrayDnaCodonSequence should behave as expected | def test_ModelDnaCodonSequence(self):
d = ArrayDnaCodonSequence("UUUCGU")
self.assertEqual(str(d), "TTTCGT")
self.assertEqual(d._data, array([0, 28]))
self.assertEqual(str(d.to_rna()), "UUUCGU")
self.assertEqual(str(d.to_dna()), "TTTCGT") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ModelRnaCodonSequence(self):\n r = ArrayRnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(r), \"UUUCGU\")\n self.assertEqual(r._data, array([0, 28]))\n self.assertEqual(str(r.to_rna()), \"UUUCGU\")\n self.assertEqual(str(r.to_dna()), \"TTTCGT\")",
"def initialize_dna(self):\n return np.random.rand(1, self.n_genes) * 2 - 1",
"def aa_generator_DNA(dnaseq):\n return (translate_DNA_codon(dnaseq[n:n+3])\n for n in range(0, len(dnaseq), 3))",
"def __init__(self):\n self.dna = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]",
"def test_fasta_get_codons_from_seq(self):\r\n\r\n codon_list = mfau.get_codons_from_sequence(\"AGGTGACACCGCAAGCCTTATATTAGC\")\r\n\r\n if debug:\r\n for codon in codon_list:\r\n print codon\r\n\r\n self.assertGreaterEqual(len(codon_list), 0)",
"def test_correct_sequence_list(self):\n pairs = [\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n ]\n\n sequences = [p.sequence for p in pairs]\n\n dna = DNA()\n [dna.append(p) for p in pairs]\n\n # Sequence must match\n self.assertEqual(dna.to_sequence_list(), sequences)",
"def translate_DNA(dnaseq):\n\n gen = aa_generator_DNA(dnaseq)\n seq = ''\n aa = next(gen, None)\n while aa:\n seq += aa\n aa = next(gen, None)\n return seq",
"def coding_strand_to_AA(dna):\n amino_acid=\"\"\n for i in range(0, len(dna), 3):\n mycodon=dna[i:i+3]\n # print'this is my codon'\n #print mycodon\n for j in range(len(codons)):\n for k in range(len(codons[j])):\n #print codons[j][k]\n if codons[j][k] == mycodon:\n #print aa[j]\n amino_acid += aa[j]\n return amino_acid\n \n #step uno break apart string into groups of three\n #find sequence +find index\n #then connect to amino acids ",
"def test_dna_existent_sequence(self):\n pairs = [\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n self._create_codon_pair(),\n ]\n\n dna = DNA()\n for codon_pair in pairs:\n dna.append(codon_pair)\n\n for index, code_pair in enumerate(dna):\n self.assertEqual(code_pair.sequence, pairs[index].sequence)",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SequenceClass(even, name=\"even\")\n odd_dna = self.SequenceClass(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def Sequence(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph):\n normal, muta = genere_chains(nbr_comp, 4)\n n_tot = (nbr_by_label + nbr_by_label_test + 1)\n X_n = [mutation(normal, [0.1, 0.1]) for _ in range(n_tot)]\n X_m = [mutation(muta, [0.1, 0.1]) for _ in range(n_tot)]\n X_crash_n = []\n X_crash_m = []\n for seq in X_n:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_n.append(crash)\n for seq in X_m:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_m.append(crash)\n X_n = np.array(X_crash_n)\n X_m = np.array(X_crash_m)\n if plot_graph:\n plt.scatter(X_n[:, 0][:nbr_by_label], X_n[:, 0][:nbr_by_label])\n plt.scatter(X_m[:, 0][:nbr_by_label], X_m[:, 0][:nbr_by_label])\n\n plt.title(\"ADN sequences\")\n plt.show()\n training_input = {\"N\": X_n[:nbr_by_label], \"M\": X_m[:nbr_by_label]}\n test_input = {\"N\": X_n[nbr_by_label:n_tot], \"M\": X_m[nbr_by_label:n_tot]}\n return [X_n, X_m], training_input, test_input, [\"N\", \"M\"]",
"def __init__(self, seq, peptide):\r\n self.seq = seq # original DNA sequence\r\n self.peptide = peptide # original peptide sequence\r\n self.allPepSeqs = [] # list to hold all possible nuc sequences based on the peptide sequence\r\n self.codonTable = { # holds all amino acids and their associated codons\r\n 'F': ['TTT', 'TTC'], 'S': ['TCT', 'TCC', 'TCA', 'TCG', 'AGT', 'AGC'],\r\n 'Y': ['TAT', 'TAC'], 'C': ['TGT', 'TGC'], 'L': ['TTA', 'TTG', 'CTT', 'CTC', 'CTA', 'CTG'],\r\n '-': ['TAA', 'TGA', 'TAG'], 'W': ['TGG'], 'P': ['CCT', 'CCC', 'CCA', 'CCG'],\r\n 'H': ['CAT', 'CAC'], 'R': ['CGT', 'CGC', 'CGA', 'CGG', 'AGA', 'AGG'], 'Q': ['CAA', 'CAG'],\r\n 'I': ['ATT', 'ATC', 'ATA'], 'T': ['ACT', 'ACC', 'ACA', 'ACG'], 'N': ['AAT', 'AAC'],\r\n 'K': ['AAA', 'AAG'], 'M': ['ATG'], 'V': ['GTT', 'GTC', 'GTA', 'GTG'],\r\n 'A': ['GCT', 'GCC', 'GCA', 'GCG'], 'D': ['GAT', 'GAC'], 'G': ['GGT', 'GGC', 'GGA', 'GGG'],\r\n 'E': ['GAA', 'GAG']\r\n }",
"def convert_to_dna(protein_sequence, wt_protein_dict):\n variant_dna_codons = []\n for index in range(0, len(protein_sequence.seq)):\n wt_aa = str(wt_protein_dict[index + 1][0])\n codon = str(wt_protein_dict[index + 1][1])\n variant_aa = protein_sequence.seq[index]\n if variant_aa != wt_aa:\n if variant_aa is not '-':\n codon = sorted_codon_table[str(variant_aa)][0]\n variant_dna_codons.append(codon)\n variant_dna_str = \"\".join(variant_dna_codons)\n variant_dna_seq = Seq(variant_dna_str, IUPAC.unambiguous_dna)\n variant_dna_seq_obj = SeqRecord(variant_dna_seq, id=protein_sequence.id, name=protein_sequence.name,\n description=protein_sequence.description)\n return variant_dna_seq_obj",
"def test_to_fasta(self):\n even = \"TCAGAT\"\n odd = even + \"AAA\"\n even_dna = self.SEQ(even, name=\"even\")\n odd_dna = self.SEQ(odd, name=\"odd\")\n self.assertEqual(even_dna.to_fasta(), \">even\\nTCAGAT\\n\")\n # set line wrap to small number so we can test that it works\n self.assertEqual(even_dna.to_fasta(block_size=2), \">even\\nTC\\nAG\\nAT\\n\")\n self.assertEqual(odd_dna.to_fasta(block_size=2), \">odd\\nTC\\nAG\\nAT\\nAA\\nA\\n\")\n # check that changing the linewrap again works\n self.assertEqual(even_dna.to_fasta(block_size=4), \">even\\nTCAG\\nAT\\n\")",
"def dna(self):\n return self.seq.replace('U', 'T').replace('u', 't')",
"def hot1_dna(seqs_1hot):\n\n singleton = False\n if seqs_1hot.ndim == 2:\n singleton = True\n seqs_1hot = np.expand_dims(seqs_1hot, 0)\n\n seqs = []\n for si in range(seqs_1hot.shape[0]):\n seq_list = ['A'] * seqs_1hot.shape[1]\n for li in range(seqs_1hot.shape[1]):\n if seqs_1hot[si, li, 0] == 1:\n seq_list[li] = 'A'\n elif seqs_1hot[si, li, 1] == 1:\n seq_list[li] = 'C'\n elif seqs_1hot[si, li, 2] == 1:\n seq_list[li] = 'G'\n elif seqs_1hot[si, li, 3] == 1:\n seq_list[li] = 'T'\n else:\n seq_list[li] = 'N'\n\n seqs.append(''.join(seq_list))\n\n if singleton:\n seqs = seqs[0]\n\n return seqs",
"def codons(self, frame):\n start = frame\n while start + 3 <= self.size:\n yield self.sequence[start : start + 3], start\n start += 3",
"def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)",
"def dna_number(bp_seq):\r\n # Hint: use dna_digit\r\n\r\n # YOUR CODE HERE\r",
"def test_DnaSequence(self):\n x = DnaSequence(\"tcag\")\n # note: no longer preserves case\n self.assertEqual(x, \"TCAG\")\n\n x = DnaSequence(\"aaa\") + DnaSequence(\"ccc\")\n # note: doesn't preserve case\n self.assertEqual(x, \"AAACCC\")\n assert x.moltype is DNA\n self.assertRaises(AlphabetError, x.__add__, \"z\")\n self.assertEqual(DnaSequence(\"TTTAc\").rc(), \"GTAAA\")",
"def coding_strand_to_AA(dna):\n Seq = ''\n for i in range(0,len(dna),3): \n triple = dna[i:i+3]\n print triple\n for k in range(len(codons)):\n if triple in codons[k]: \n print \"Casey Rocks\"\n print codons[k]\n amino = aa[k]\n Seq+=amino\n return Seq",
"def __init__(self, dna):\n self.dna = dna",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def AnBn(nseq, nT, L, eps=0.5, cue=True, align=False, atfront=True):\n \n p_gram = (1-eps)\n p_nois = eps\n # here's one way to generate the sequences, \n # going to create an empty array, fill it with the valid sequences first\n seqs = -1*np.ones((nseq, nT))\n \n n = int(p_gram*nseq/len(L))\n N = 0\n for l in L:\n \n valid_seqs = np.apply_along_axis(np.repeat, 1, np.repeat([[0,1]],n,0), [l, l])\n \n if align:\n idx = np.arange(0,nT-np.mod(nT,2*l),np.floor(nT/(2*l)))\n idx = np.ones(n,nT)*idx[None,:]\n else:\n idx = np.random.rand(n,nT).argsort(1)[:,:(2*l)]\n idx = np.sort(idx,1)\n np.put_along_axis(seqs[N:N+n,:], idx, valid_seqs, axis=1)\n N+=n\n \n # now I want to add noise sequences, i.e. random number of A and B tokens\n # but I want to make sure that the sparseness of the sequences isn't\n # too different from the grammatical ones -- so I set that manually\n \n thr = sts.norm.ppf(2*np.mean(L)/nT)\n noise_seqs = ((np.ones(nseq-N)[:,None]*np.arange(nT) - np.random.choice(nT-5,(nseq-N,1)))>0).astype(int)\n noise_seqs[np.random.randn(nseq-N,nT)>thr] = -1\n \n seqs[N:,:] = noise_seqs\n labels = (seqs == 0).sum(1) == (seqs==1).sum(1)\n \n if cue:\n seqs = np.append(seqs, np.ones(nseq)[:,None]*2, axis=1)\n if atfront:\n # push to the front\n seqs = np.where(seqs==-1, np.nan, seqs)\n seqs = np.sort(seqs,1)\n seqs = np.where(np.isnan(seqs),-1,seqs)\n \n shf = np.random.choice(nseq,nseq,replace=False)\n seqs = seqs[shf,:]\n labels = labels[shf]\n \n return seqs, labels",
"def bipa(sequence):\n return [_token2clts(segment)[0] for segment in sequence]",
"def build_sequences(dcm):\n dimension_organization_uid = '1.2.276.0.7230010.3.1.4.8323329.20175.1573232544.237437'\n ds0 = Dataset()\n ds0.DimensionOrganizationUID = dimension_organization_uid\n dcm.DimensionOrganizationSequence = Sequence([ds0])\n del ds0\n\n ds1 = Dataset()\n ds1.DimensionOrganizationUID = dimension_organization_uid\n ds1.DimensionIndexPointer = Tag(0x0048021E)\n ds1.FunctionalGroupPointer = Tag(0x0048021A)\n\n ds2 = Dataset()\n ds2.DimensionOrganizationUID = dimension_organization_uid\n ds2.DimensionIndexPointer = Tag(0x0048021F)\n ds2.FunctionalGroupPointer = Tag(0x0048021A)\n\n dcm.DimensionIndexSequence = Sequence([ds1, ds2])\n del ds1, ds2\n\n ds3 = Dataset()\n ds3.XOffsetInSlideCoordinateSystem = 20\n ds3.YOffsetInSlideCoordinateSystem = 40\n dcm.TotalPixelMatrixOriginSequence = Sequence([ds3])\n del ds3\n\n ds4 = Dataset()\n ds5 = Dataset()\n\n # IlluminationTypeCodeSequence\n ds4.CodingSchemeDesignator = 'DCM'\n ds4.CodeMeaning = 'Brightfield illumination'\n ds4.CodeValue = '111744'\n\n # IlluminationColorCodeSequence\n ds5.CodingSchemeDesignator = 'DCM'\n ds5.CodeMeaning = 'No filter'\n ds5.CodeValue = '111609'\n\n ds7 = Dataset()\n ds7.IlluminationTypeCodeSequence = Sequence([ds4])\n ds7.IlluminationColorCodeSequence = Sequence([ds5])\n # noinspection PyPep8,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection,SpellCheckingInspection\n ds7.ICCProfile = b'\\x00\\x00\\x1b\\nlcms\\x020\\x00\\x00mntrRGB XYZ \\x07\\xd4\\x00\\x08\\x00\\r\\x00\\x0c\\x00\\x12\\x00\\x06acspMSFT\\x00\\x00\\x00\\x00lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\xf6\\xd6\\x00\\x01\\x00\\x00\\x00\\x00\\xd3-lcms\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x0cdmnd\\x00\\x00\\x01\\x14\\x00\\x00\\x00jdesc\\x00\\x00\\x01\\x80\\x00\\x00\\x00hdmdd\\x00\\x00\\x01\\xe8\\x00\\x00\\x00hwtpt\\x00\\x00\\x02P\\x00\\x00\\x00\\x14rXYZ\\x00\\x00\\x02d\\x00\\x00\\x00\\x14bXYZ\\x00\\x00\\x02x\\x00\\x00\\x00\\x14gXYZ\\x00\\x00\\x02\\x8c\\x00\\x00\\x00\\x14rTRC\\x00\\x00\\x02\\xa0\\x00\\x00\\x08\\x0cgTRC\\x00\\x00\\n\\xac\\x00\\x00\\x08\\x0cbTRC\\x00\\x00\\x12\\xb8\\x00\\x00\\x08\\x0cchrm\\x00\\x00\\x1a\\xc4\\x00\\x00\\x00$cprt\\x00\\x00\\x1a\\xe8\\x00\\x00\\x00!desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x10lcms generated \\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00desc\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x05sRGB\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00XYZ \\x00\\x00\\x00\\x00\\x00\\x00\\xf3=\\x00\\x01\\x00\\x00\\x00\\x01\\x16\\x98XYZ \\x00\\x00\\x00\\x00\\x00\\x00o\\x94\\x00\\x008\\xee\\x00\\x00\\x03\\x90XYZ \\x00\\x00\\x00\\x00\\x00\\x00$\\x9d\\x00\\x00\\x0f\\x83\\x00\\x00\\xb6\\xbeXYZ \\x00\\x00\\x00\\x00\\x00\\x00b\\xa5\\x00\\x00\\xb7\\x90\\x00\\x00\\x18\\xdecurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffcurv\\x00\\x00\\x00\\x00\\x00\\x00\\x04\\x00\\x00\\x00\\x00\\x05\\x00\\n\\x00\\x0f\\x00\\x14\\x00\\x19\\x00\\x1e\\x00#\\x00(\\x00-\\x002\\x007\\x00;\\x00@\\x00E\\x00J\\x00O\\x00T\\x00Y\\x00^\\x00c\\x00h\\x00m\\x00r\\x00w\\x00|\\x00\\x81\\x00\\x86\\x00\\x8b\\x00\\x90\\x00\\x95\\x00\\x9a\\x00\\x9f\\x00\\xa4\\x00\\xa9\\x00\\xae\\x00\\xb2\\x00\\xb7\\x00\\xbc\\x00\\xc1\\x00\\xc6\\x00\\xcb\\x00\\xd0\\x00\\xd5\\x00\\xdb\\x00\\xe0\\x00\\xe5\\x00\\xeb\\x00\\xf0\\x00\\xf6\\x00\\xfb\\x01\\x01\\x01\\x07\\x01\\r\\x01\\x13\\x01\\x19\\x01\\x1f\\x01%\\x01+\\x012\\x018\\x01>\\x01E\\x01L\\x01R\\x01Y\\x01`\\x01g\\x01n\\x01u\\x01|\\x01\\x83\\x01\\x8b\\x01\\x92\\x01\\x9a\\x01\\xa1\\x01\\xa9\\x01\\xb1\\x01\\xb9\\x01\\xc1\\x01\\xc9\\x01\\xd1\\x01\\xd9\\x01\\xe1\\x01\\xe9\\x01\\xf2\\x01\\xfa\\x02\\x03\\x02\\x0c\\x02\\x14\\x02\\x1d\\x02&\\x02/\\x028\\x02A\\x02K\\x02T\\x02]\\x02g\\x02q\\x02z\\x02\\x84\\x02\\x8e\\x02\\x98\\x02\\xa2\\x02\\xac\\x02\\xb6\\x02\\xc1\\x02\\xcb\\x02\\xd5\\x02\\xe0\\x02\\xeb\\x02\\xf5\\x03\\x00\\x03\\x0b\\x03\\x16\\x03!\\x03-\\x038\\x03C\\x03O\\x03Z\\x03f\\x03r\\x03~\\x03\\x8a\\x03\\x96\\x03\\xa2\\x03\\xae\\x03\\xba\\x03\\xc7\\x03\\xd3\\x03\\xe0\\x03\\xec\\x03\\xf9\\x04\\x06\\x04\\x13\\x04 \\x04-\\x04;\\x04H\\x04U\\x04c\\x04q\\x04~\\x04\\x8c\\x04\\x9a\\x04\\xa8\\x04\\xb6\\x04\\xc4\\x04\\xd3\\x04\\xe1\\x04\\xf0\\x04\\xfe\\x05\\r\\x05\\x1c\\x05+\\x05:\\x05I\\x05X\\x05g\\x05w\\x05\\x86\\x05\\x96\\x05\\xa6\\x05\\xb5\\x05\\xc5\\x05\\xd5\\x05\\xe5\\x05\\xf6\\x06\\x06\\x06\\x16\\x06\\'\\x067\\x06H\\x06Y\\x06j\\x06{\\x06\\x8c\\x06\\x9d\\x06\\xaf\\x06\\xc0\\x06\\xd1\\x06\\xe3\\x06\\xf5\\x07\\x07\\x07\\x19\\x07+\\x07=\\x07O\\x07a\\x07t\\x07\\x86\\x07\\x99\\x07\\xac\\x07\\xbf\\x07\\xd2\\x07\\xe5\\x07\\xf8\\x08\\x0b\\x08\\x1f\\x082\\x08F\\x08Z\\x08n\\x08\\x82\\x08\\x96\\x08\\xaa\\x08\\xbe\\x08\\xd2\\x08\\xe7\\x08\\xfb\\t\\x10\\t%\\t:\\tO\\td\\ty\\t\\x8f\\t\\xa4\\t\\xba\\t\\xcf\\t\\xe5\\t\\xfb\\n\\x11\\n\\'\\n=\\nT\\nj\\n\\x81\\n\\x98\\n\\xae\\n\\xc5\\n\\xdc\\n\\xf3\\x0b\\x0b\\x0b\"\\x0b9\\x0bQ\\x0bi\\x0b\\x80\\x0b\\x98\\x0b\\xb0\\x0b\\xc8\\x0b\\xe1\\x0b\\xf9\\x0c\\x12\\x0c*\\x0cC\\x0c\\\\\\x0cu\\x0c\\x8e\\x0c\\xa7\\x0c\\xc0\\x0c\\xd9\\x0c\\xf3\\r\\r\\r&\\r@\\rZ\\rt\\r\\x8e\\r\\xa9\\r\\xc3\\r\\xde\\r\\xf8\\x0e\\x13\\x0e.\\x0eI\\x0ed\\x0e\\x7f\\x0e\\x9b\\x0e\\xb6\\x0e\\xd2\\x0e\\xee\\x0f\\t\\x0f%\\x0fA\\x0f^\\x0fz\\x0f\\x96\\x0f\\xb3\\x0f\\xcf\\x0f\\xec\\x10\\t\\x10&\\x10C\\x10a\\x10~\\x10\\x9b\\x10\\xb9\\x10\\xd7\\x10\\xf5\\x11\\x13\\x111\\x11O\\x11m\\x11\\x8c\\x11\\xaa\\x11\\xc9\\x11\\xe8\\x12\\x07\\x12&\\x12E\\x12d\\x12\\x84\\x12\\xa3\\x12\\xc3\\x12\\xe3\\x13\\x03\\x13#\\x13C\\x13c\\x13\\x83\\x13\\xa4\\x13\\xc5\\x13\\xe5\\x14\\x06\\x14\\'\\x14I\\x14j\\x14\\x8b\\x14\\xad\\x14\\xce\\x14\\xf0\\x15\\x12\\x154\\x15V\\x15x\\x15\\x9b\\x15\\xbd\\x15\\xe0\\x16\\x03\\x16&\\x16I\\x16l\\x16\\x8f\\x16\\xb2\\x16\\xd6\\x16\\xfa\\x17\\x1d\\x17A\\x17e\\x17\\x89\\x17\\xae\\x17\\xd2\\x17\\xf7\\x18\\x1b\\x18@\\x18e\\x18\\x8a\\x18\\xaf\\x18\\xd5\\x18\\xfa\\x19 \\x19E\\x19k\\x19\\x91\\x19\\xb7\\x19\\xdd\\x1a\\x04\\x1a*\\x1aQ\\x1aw\\x1a\\x9e\\x1a\\xc5\\x1a\\xec\\x1b\\x14\\x1b;\\x1bc\\x1b\\x8a\\x1b\\xb2\\x1b\\xda\\x1c\\x02\\x1c*\\x1cR\\x1c{\\x1c\\xa3\\x1c\\xcc\\x1c\\xf5\\x1d\\x1e\\x1dG\\x1dp\\x1d\\x99\\x1d\\xc3\\x1d\\xec\\x1e\\x16\\x1e@\\x1ej\\x1e\\x94\\x1e\\xbe\\x1e\\xe9\\x1f\\x13\\x1f>\\x1fi\\x1f\\x94\\x1f\\xbf\\x1f\\xea \\x15 A l \\x98 \\xc4 \\xf0!\\x1c!H!u!\\xa1!\\xce!\\xfb\"\\'\"U\"\\x82\"\\xaf\"\\xdd#\\n#8#f#\\x94#\\xc2#\\xf0$\\x1f$M$|$\\xab$\\xda%\\t%8%h%\\x97%\\xc7%\\xf7&\\'&W&\\x87&\\xb7&\\xe8\\'\\x18\\'I\\'z\\'\\xab\\'\\xdc(\\r(?(q(\\xa2(\\xd4)\\x06)8)k)\\x9d)\\xd0*\\x02*5*h*\\x9b*\\xcf+\\x02+6+i+\\x9d+\\xd1,\\x05,9,n,\\xa2,\\xd7-\\x0c-A-v-\\xab-\\xe1.\\x16.L.\\x82.\\xb7.\\xee/$/Z/\\x91/\\xc7/\\xfe050l0\\xa40\\xdb1\\x121J1\\x821\\xba1\\xf22*2c2\\x9b2\\xd43\\r3F3\\x7f3\\xb83\\xf14+4e4\\x9e4\\xd85\\x135M5\\x875\\xc25\\xfd676r6\\xae6\\xe97$7`7\\x9c7\\xd78\\x148P8\\x8c8\\xc89\\x059B9\\x7f9\\xbc9\\xf9:6:t:\\xb2:\\xef;-;k;\\xaa;\\xe8<\\'<e<\\xa4<\\xe3=\"=a=\\xa1=\\xe0> >`>\\xa0>\\xe0?!?a?\\xa2?\\xe2@#@d@\\xa6@\\xe7A)AjA\\xacA\\xeeB0BrB\\xb5B\\xf7C:C}C\\xc0D\\x03DGD\\x8aD\\xceE\\x12EUE\\x9aE\\xdeF\"FgF\\xabF\\xf0G5G{G\\xc0H\\x05HKH\\x91H\\xd7I\\x1dIcI\\xa9I\\xf0J7J}J\\xc4K\\x0cKSK\\x9aK\\xe2L*LrL\\xbaM\\x02MJM\\x93M\\xdcN%NnN\\xb7O\\x00OIO\\x93O\\xddP\\'PqP\\xbbQ\\x06QPQ\\x9bQ\\xe6R1R|R\\xc7S\\x13S_S\\xaaS\\xf6TBT\\x8fT\\xdbU(UuU\\xc2V\\x0fV\\\\V\\xa9V\\xf7WDW\\x92W\\xe0X/X}X\\xcbY\\x1aYiY\\xb8Z\\x07ZVZ\\xa6Z\\xf5[E[\\x95[\\xe5\\\\5\\\\\\x86\\\\\\xd6]\\']x]\\xc9^\\x1a^l^\\xbd_\\x0f_a_\\xb3`\\x05`W`\\xaa`\\xfcaOa\\xa2a\\xf5bIb\\x9cb\\xf0cCc\\x97c\\xebd@d\\x94d\\xe9e=e\\x92e\\xe7f=f\\x92f\\xe8g=g\\x93g\\xe9h?h\\x96h\\xeciCi\\x9ai\\xf1jHj\\x9fj\\xf7kOk\\xa7k\\xfflWl\\xafm\\x08m`m\\xb9n\\x12nkn\\xc4o\\x1eoxo\\xd1p+p\\x86p\\xe0q:q\\x95q\\xf0rKr\\xa6s\\x01s]s\\xb8t\\x14tpt\\xccu(u\\x85u\\xe1v>v\\x9bv\\xf8wVw\\xb3x\\x11xnx\\xccy*y\\x89y\\xe7zFz\\xa5{\\x04{c{\\xc2|!|\\x81|\\xe1}A}\\xa1~\\x01~b~\\xc2\\x7f#\\x7f\\x84\\x7f\\xe5\\x80G\\x80\\xa8\\x81\\n\\x81k\\x81\\xcd\\x820\\x82\\x92\\x82\\xf4\\x83W\\x83\\xba\\x84\\x1d\\x84\\x80\\x84\\xe3\\x85G\\x85\\xab\\x86\\x0e\\x86r\\x86\\xd7\\x87;\\x87\\x9f\\x88\\x04\\x88i\\x88\\xce\\x893\\x89\\x99\\x89\\xfe\\x8ad\\x8a\\xca\\x8b0\\x8b\\x96\\x8b\\xfc\\x8cc\\x8c\\xca\\x8d1\\x8d\\x98\\x8d\\xff\\x8ef\\x8e\\xce\\x8f6\\x8f\\x9e\\x90\\x06\\x90n\\x90\\xd6\\x91?\\x91\\xa8\\x92\\x11\\x92z\\x92\\xe3\\x93M\\x93\\xb6\\x94 \\x94\\x8a\\x94\\xf4\\x95_\\x95\\xc9\\x964\\x96\\x9f\\x97\\n\\x97u\\x97\\xe0\\x98L\\x98\\xb8\\x99$\\x99\\x90\\x99\\xfc\\x9ah\\x9a\\xd5\\x9bB\\x9b\\xaf\\x9c\\x1c\\x9c\\x89\\x9c\\xf7\\x9dd\\x9d\\xd2\\x9e@\\x9e\\xae\\x9f\\x1d\\x9f\\x8b\\x9f\\xfa\\xa0i\\xa0\\xd8\\xa1G\\xa1\\xb6\\xa2&\\xa2\\x96\\xa3\\x06\\xa3v\\xa3\\xe6\\xa4V\\xa4\\xc7\\xa58\\xa5\\xa9\\xa6\\x1a\\xa6\\x8b\\xa6\\xfd\\xa7n\\xa7\\xe0\\xa8R\\xa8\\xc4\\xa97\\xa9\\xa9\\xaa\\x1c\\xaa\\x8f\\xab\\x02\\xabu\\xab\\xe9\\xac\\\\\\xac\\xd0\\xadD\\xad\\xb8\\xae-\\xae\\xa1\\xaf\\x16\\xaf\\x8b\\xb0\\x00\\xb0u\\xb0\\xea\\xb1`\\xb1\\xd6\\xb2K\\xb2\\xc2\\xb38\\xb3\\xae\\xb4%\\xb4\\x9c\\xb5\\x13\\xb5\\x8a\\xb6\\x01\\xb6y\\xb6\\xf0\\xb7h\\xb7\\xe0\\xb8Y\\xb8\\xd1\\xb9J\\xb9\\xc2\\xba;\\xba\\xb5\\xbb.\\xbb\\xa7\\xbc!\\xbc\\x9b\\xbd\\x15\\xbd\\x8f\\xbe\\n\\xbe\\x84\\xbe\\xff\\xbfz\\xbf\\xf5\\xc0p\\xc0\\xec\\xc1g\\xc1\\xe3\\xc2_\\xc2\\xdb\\xc3X\\xc3\\xd4\\xc4Q\\xc4\\xce\\xc5K\\xc5\\xc8\\xc6F\\xc6\\xc3\\xc7A\\xc7\\xbf\\xc8=\\xc8\\xbc\\xc9:\\xc9\\xb9\\xca8\\xca\\xb7\\xcb6\\xcb\\xb6\\xcc5\\xcc\\xb5\\xcd5\\xcd\\xb5\\xce6\\xce\\xb6\\xcf7\\xcf\\xb8\\xd09\\xd0\\xba\\xd1<\\xd1\\xbe\\xd2?\\xd2\\xc1\\xd3D\\xd3\\xc6\\xd4I\\xd4\\xcb\\xd5N\\xd5\\xd1\\xd6U\\xd6\\xd8\\xd7\\\\\\xd7\\xe0\\xd8d\\xd8\\xe8\\xd9l\\xd9\\xf1\\xdav\\xda\\xfb\\xdb\\x80\\xdc\\x05\\xdc\\x8a\\xdd\\x10\\xdd\\x96\\xde\\x1c\\xde\\xa2\\xdf)\\xdf\\xaf\\xe06\\xe0\\xbd\\xe1D\\xe1\\xcc\\xe2S\\xe2\\xdb\\xe3c\\xe3\\xeb\\xe4s\\xe4\\xfc\\xe5\\x84\\xe6\\r\\xe6\\x96\\xe7\\x1f\\xe7\\xa9\\xe82\\xe8\\xbc\\xe9F\\xe9\\xd0\\xea[\\xea\\xe5\\xebp\\xeb\\xfb\\xec\\x86\\xed\\x11\\xed\\x9c\\xee(\\xee\\xb4\\xef@\\xef\\xcc\\xf0X\\xf0\\xe5\\xf1r\\xf1\\xff\\xf2\\x8c\\xf3\\x19\\xf3\\xa7\\xf44\\xf4\\xc2\\xf5P\\xf5\\xde\\xf6m\\xf6\\xfb\\xf7\\x8a\\xf8\\x19\\xf8\\xa8\\xf98\\xf9\\xc7\\xfaW\\xfa\\xe7\\xfbw\\xfc\\x07\\xfc\\x98\\xfd)\\xfd\\xba\\xfeK\\xfe\\xdc\\xffm\\xff\\xffchrm\\x00\\x00\\x00\\x00\\x00\\x03\\x00\\x00\\x00\\x00\\xa3\\xd7\\x00\\x00T{\\x00\\x00L\\xcd\\x00\\x00\\x99\\x9a\\x00\\x00&f\\x00\\x00\\x0f\\\\text\\x00\\x00\\x00\\x00no copyright, use freely\\x00\\n'\n ds7.OpticalPathIdentifier = '1'\n # noinspection SpellCheckingInspection\n ds7.OpticalPathDescription = 'Brightfield'\n\n dcm.OpticalPathSequence = Sequence([ds7])\n del ds7, ds5, ds4\n\n dcm.AcquisitionContextSequence = Sequence([])\n\n ds0 = Dataset()\n ds0.LocalNamespaceEntityID = 'UNKNOWN'\n dcm.IssuerOfTheContainerIdentifierSequence = Sequence([ds0])\n del ds0\n\n ds0 = Dataset()\n\n ds0.SpecimenIdentifier = 'UNKNOWN'\n ds0.SpecimenPreparationSequence = Sequence([])\n ds0.SpecimenUID = generate_uid(prefix=None)\n ds0.IssuerOfTheSpecimenIdentifierSequence = Sequence([])\n dcm.SpecimenDescriptionSequence = Sequence([ds0])\n dcm.ContainerTypeCodeSequence = Sequence([])\n dcm.ContainerIdentifier = 'UNKNOWN'\n return dcm",
"def test_to_dna(self):\n r = self.RNA(\"TCA\")\n self.assertEqual(str(r), \"UCA\")\n self.assertEqual(str(r.to_dna()), \"TCA\")",
"def coding_strand_to_AA(dna):\n num_codons = int(len(dna)/3)\n num = 0\n list_codons = []\n aacids = ''\n while num < num_codons:\n num_start = int(num*3)\n num_end = int(num*3 + 3)\n list_codons.append(dna[num_start:num_end])\n num = num + 1\n for element in list_codons:\n thing = aa_table[element]\n aacids = aacids + thing\n return aacids",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def main():\n\n args = get_args()\n seq = args.seq.upper()\n codon_to_aa = {\n 'AAA': 'K',\n 'AAC': 'N',\n 'AAG': 'K',\n 'AAU': 'N',\n 'ACA': 'T',\n 'ACC': 'T',\n 'ACG': 'T',\n 'ACU': 'T',\n 'AGA': 'R',\n 'AGC': 'S',\n 'AGG': 'R',\n 'AGU': 'S',\n 'AUA': 'I',\n 'AUC': 'I',\n 'AUG': 'M',\n 'AUU': 'I',\n 'CAA': 'Q',\n 'CAC': 'H',\n 'CAG': 'Q',\n 'CAU': 'H',\n 'CCA': 'P',\n 'CCC': 'P',\n 'CCG': 'P',\n 'CCU': 'P',\n 'CGA': 'R',\n 'CGC': 'R',\n 'CGG': 'R',\n 'CGU': 'R',\n 'CUA': 'L',\n 'CUC': 'L',\n 'CUG': 'L',\n 'CUU': 'L',\n 'GAA': 'E',\n 'GAC': 'D',\n 'GAG': 'E',\n 'GAU': 'D',\n 'GCA': 'A',\n 'GCC': 'A',\n 'GCG': 'A',\n 'GCU': 'A',\n 'GGA': 'G',\n 'GGC': 'G',\n 'GGG': 'G',\n 'GGU': 'G',\n 'GUA': 'V',\n 'GUC': 'V',\n 'GUG': 'V',\n 'GUU': 'V',\n 'UAA': 'Stop',\n 'UAC': 'Y',\n 'UAG': 'Stop',\n 'UAU': 'Y',\n 'UCA': 'S',\n 'UCC': 'S',\n 'UCG': 'S',\n 'UCU': 'S',\n 'UGA': 'Stop',\n 'UGC': 'C',\n 'UGG': 'W',\n 'UGU': 'C',\n 'UUA': 'L',\n 'UUC': 'F',\n 'UUG': 'L',\n 'UUU': 'F',\n }\n\n k = 3\n\n # 1: for loop\n # protein = ''\n # for codon in [seq[i:i + k] for i in range(0, len(seq), k)]:\n # aa = codon_to_aa.get(codon, '-')\n # if aa == 'Stop':\n # break\n # protein += aa\n\n # 2: list comprehension, slice to remove Stop\n # codons = [seq[i:i + k] for i in range(0, len(seq), k)]\n # aa = [codon_to_aa.get(codon, '-') for codon in codons]\n # if 'Stop' in aa:\n # aa = aa[:aa.index('Stop')]\n # print(''.join(aa))\n\n # 3: L.C. -> map(), slice -> takewhile\n # codons = map(lambda i: seq[i:i + k], range(0, len(seq), k))\n # aa = map(lambda codon: codon_to_aa.get(codon, '-'), codons)\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 4: combine map()\n # aa = map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k)))\n # print(''.join(takewhile(lambda c: c != 'Stop', aa)))\n\n # 5: combine all\n # print(''.join(\n # takewhile(\n # lambda c: c != 'Stop',\n # map(lambda c: codon_to_aa.get(c, '-'),\n # map(lambda i: seq[i:i + k], range(0, len(seq), k))))))\n\n # 6: Seq\n print(str(Seq(args.seq).translate()).replace('*', ''))"
] | [
"0.7328825",
"0.64691025",
"0.64041036",
"0.6317699",
"0.6250591",
"0.6247037",
"0.62177837",
"0.610478",
"0.6104387",
"0.6039625",
"0.6023548",
"0.6010523",
"0.6005684",
"0.600158",
"0.59115016",
"0.5885456",
"0.5826384",
"0.5823113",
"0.58168215",
"0.5790888",
"0.57801914",
"0.5735055",
"0.5722379",
"0.5698199",
"0.567688",
"0.56602913",
"0.56524646",
"0.5648886",
"0.56440234",
"0.5642263"
] | 0.8081281 | 0 |
ArrayRnaCodonSequence should behave as expected | def test_ModelRnaCodonSequence(self):
r = ArrayRnaCodonSequence("UUUCGU")
self.assertEqual(str(r), "UUUCGU")
self.assertEqual(r._data, array([0, 28]))
self.assertEqual(str(r.to_rna()), "UUUCGU")
self.assertEqual(str(r.to_dna()), "TTTCGT") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_ModelDnaCodonSequence(self):\n d = ArrayDnaCodonSequence(\"UUUCGU\")\n self.assertEqual(str(d), \"TTTCGT\")\n self.assertEqual(d._data, array([0, 28]))\n self.assertEqual(str(d.to_rna()), \"UUUCGU\")\n self.assertEqual(str(d.to_dna()), \"TTTCGT\")",
"def codons(self, frame):\n start = frame\n while start + 3 <= self.size:\n yield self.sequence[start : start + 3], start\n start += 3",
"def test_regular_to_model(self):\n r = RNA.make_seq(\"AAA\", name=\"x\")\n s = RNA.make_array_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def to_rna(seq):\n seq=seq.replace('A','U')\n seq=seq.replace('T','A')\n seq=seq.replace('C',\"P\")\n seq=seq.replace('G','C')\n seq=seq.replace('P','G')\n return seq",
"def test_sequence_to_moltype(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test1\")\n annot1 = s.add_annotation(Feature, \"exon\", \"fred\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"trev\", [(10, 14)])\n got = s.to_moltype(\"rna\")\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertNotEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test1\")\n\n s = Sequence(\"AAGGGGAAAACCCCCAAAAAAAAAATTTTTTTTTTAAA\", name=\"test2\")\n xx_y = [[[2, 6], 2.4], [[10, 15], 5.1], [[25, 35], 1.3]]\n y_valued = s.add_annotation(Variable, \"SNP\", \"freq\", xx_y)\n got = s.to_moltype(\"rna\")\n y_valued_slice = str(y_valued.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(y_valued_slice, got_slice)\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test2\")\n\n s = Sequence(\"TTTTTTTTTTAAAAAAAAAA\", name=\"test3\")\n data = [i for i in range(20)]\n annot4 = s.add_annotation(SimpleVariable, \"SNP\", \"freq\", data)\n got = s.to_moltype(RNA)\n annot4_slice = str(annot4.get_slice())\n got_slice = str(str(got.annotations[0].get_slice()))\n self.assertNotEqual(annot4_slice[:10], got_slice[:10])\n self.assertEqual(annot4_slice[10:20], got_slice[10:20])\n self.assertEqual(got.moltype.label, \"rna\")\n self.assertEqual(got.name, \"test3\")\n\n # calling with a null object should raise an exception\n with self.assertRaises(ValueError):\n s.to_moltype(None)\n\n with self.assertRaises(ValueError):\n s.to_moltype(\"\")",
"def rna(self):\n return self.seq.replace('T', 'U').replace('t', 'u')",
"def encode(self, seq):",
"def _getSequentialRoms(self):\n return self._roms",
"def encode(self,agent_pos,drone_pos):\n\n codeSize = self.width * self.height * 3\n\n array = np.zeros(shape=(self.width, self.height, 3), dtype='uint8')\n\n for j in range(0, self.height):\n for i in range(0, self.width):\n\n v = self.get(i, j)\n\n if v == None:\n continue\n\n array[i, j, 0] = OBJECT_TO_IDX[v.type]\n array[i, j, 1] = COLOR_TO_IDX[v.color]\n\n array[agent_pos[0],agent_pos[1],0]=10\n array[agent_pos[0],agent_pos[1],1]=10\n array[drone_pos[0],drone_pos[1],0]=15\n array[drone_pos[0],drone_pos[1],1]=15\n\n\n\n return array",
"def _rc_seq(self):\n logger.debug(\"Extracting sequences on the reverse strand\")\n sequences_rc = []\n table = str.maketrans({'a': 't', 'c': 'g', 'g': 'c', 't': 'a',\n 'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'})\n for sequence in self.sequences:\n sequences_rc.append(sequence.translate(table)[::-1])\n self.sequences_rc = sequences_rc",
"def i(self):\n return np.array(self._sequence)",
"def translate_sequence(rna_sequence, genetic_code):\n #Crate an empty list to store AA sequence:\n AA_list = []\n # Convert all rna_sequence to upper case:\n rna_sequence=rna_sequence.upper()\n # Convert all rna_sequence into a list:\n rna_list = list(rna_sequence)\n # This conditon will run if rna_sequence is at least 3 bases long, and only once it find start codon ,\n #and stop once it finds stop codon.\n while True:\n if len(rna_list) > 2:\n codon=''.join(rna_list[0:3])\n #Delete first 3 bases since its alread added as codon, thus no longer needed.\n del rna_list[0:3]\n else:\n break\n #Using genetic code dictionary to find AA for each corresponding codon:\n AA=genetic_code[codon]\n #Break loop once it finds stop codon\n if AA=='*':\n break\n #Add add translatable AA to the AA_list:\n AA_list.append(AA)\n return ''.join(AA_list)",
"def encode_rna(x):\n return [0 if y == 'A' else 1 if y == 'U' else 2 if y == 'G' else 3 for y in x]",
"def construct_seq(ind_i):\n track_i = track_list[ind_i]\n select_indices_i = track_i.sample_rois()\n seq_roi_list = [track_i.roi_list[i] for i in select_indices_i]\n return seq_roi_list",
"def test_init(self):\n orig = \"\"\n r = self.SequenceClass(orig)\n self.assertEqual(str(r), orig)\n\n orig = \"TCAGGA\"\n r = self.SequenceClass(orig)\n self.assertEqual(r._data, array([0, 1, 2, 3, 3, 2]))\n self.assertEqual(str(r), orig)",
"def translate(rna):\n RNA_CODON_TABLE = {\"UUU\": \"F\", \"UUC\": \"F\", \"UUA\": \"L\", \"UUG\": \"L\",\n \"UCU\": \"S\", \"UCC\": \"S\", \"UCA\": \"S\", \"UCG\": \"S\",\n \"UAU\": \"Y\", \"UAC\": \"Y\", \"UAA\": \"*\", \"UAG\": \"*\",\n \"UGU\": \"C\", \"UGC\": \"C\", \"UGA\": \"*\", \"UGG\": \"W\",\n \"CUU\": \"L\", \"CUC\": \"L\", \"CUA\": \"L\", \"CUG\": \"L\",\n \"CCU\": \"P\", \"CCC\": \"P\", \"CCA\": \"P\", \"CCG\": \"P\",\n \"CAU\": \"H\", \"CAC\": \"H\", \"CAA\": \"Q\", \"CAG\": \"Q\",\n \"CGU\": \"R\", \"CGC\": \"R\", \"CGA\": \"R\", \"CGG\": \"R\",\n \"AUU\": \"I\", \"AUC\": \"I\", \"AUA\": \"I\", \"AUG\": \"M\",\n \"ACU\": \"T\", \"ACC\": \"T\", \"ACA\": \"T\", \"ACG\": \"T\",\n \"AAU\": \"N\", \"AAC\": \"N\", \"AAA\": \"K\", \"AAG\": \"K\",\n \"AGU\": \"S\", \"AGC\": \"S\", \"AGA\": \"R\", \"AGG\": \"R\",\n \"GUU\": \"V\", \"GUC\": \"V\", \"GUA\": \"V\", \"GUG\": \"V\",\n \"GCU\": \"A\", \"GCC\": \"A\", \"GCA\": \"A\", \"GCG\": \"A\",\n \"GAU\": \"D\", \"GAC\": \"D\", \"GAA\": \"E\", \"GAG\": \"E\",\n \"GGU\": \"G\", \"GGC\": \"G\", \"GGA\": \"G\", \"GGG\": \"G\"}\n str = ''\n list = [rna[i:i+3] for i in range(0,len(rna),3)]\n for x in list:\n #checks if x is in key of RNA_CODON_TABLE\n if x in RNA_CODON_TABLE:\n #appends only if the value for the given key is not *\n if RNA_CODON_TABLE[x] != '*':\n str = str + RNA_CODON_TABLE[x]\n #if only one char is extra(meaning apart form the 3 pair characters available in dictionary)\n #checks if the char is in following\n elif len(x) == 1 and x in ['A','G','C','U']:\n str = str + x\n #if the char is of length 2 i.e, 2 words extra\n elif len(x) == 2 and x[0] in ['A','G','C','U'] and x[1] in ['A','G','C','U']:\n #Then appending the char to the actually converted string\n str = str + x[0]\n str = str + x[1]\n #if the char is not in the above characters then it is a unrecognised character.\n else:\n print(\"Unrecognised character:\",x)\n return str",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq), self.inframe_cds_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def spot1d_rsa(infile, sequence):\n data = np.loadtxt(infile, usecols=4, skiprows=1).reshape((1, -1, 1))\n for i in range(len(sequence)):\n data[0, i, 0] /= max_solvent_acc[sequence[i].upper()]\n\n return data",
"def get_seq(self): # -> list[Unknown]:\n ...",
"def test_model_to_regular(self):\n r = RNA.make_array_seq(\"AAA\", name=\"x\")\n s = RNA.make_seq(r)\n self.assertEqual(str(s), \"AAA\")\n self.assertEqual(s.moltype, RNA)\n self.assertEqual(s.name, \"x\")",
"def short_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 0\n carriers[-25] = 0\n carriers[-24] = 1 + 1j\n carriers[-23] = 0\n carriers[-22] = 0\n carriers[-21] = 0\n carriers[-20] = -1 - 1j\n carriers[-19] = 0\n carriers[-18] = 0\n carriers[-17] = 0\n carriers[-16] = 1 + 1j\n carriers[-15] = 0\n carriers[-14] = 0\n carriers[-13] = 0\n carriers[-12] = -1 - 1j\n carriers[-11] = 0\n carriers[-10] = 0\n carriers[-9] = 0\n carriers[-8] = -1 - 1j\n carriers[-7] = 0\n carriers[-6] = 0\n carriers[-5] = 0\n carriers[-4] = 1 + 1j\n carriers[-3] = 0\n carriers[-2] = 0\n carriers[-1] = 0\n carriers[0] = 0\n carriers[1] = 0\n carriers[2] = 0\n carriers[3] = 0\n carriers[4] = -1 - 1j\n carriers[5] = 0\n carriers[6] = 0\n carriers[7] = 0\n carriers[8] = -1 - 1j\n carriers[9] = 0\n carriers[10] = 0\n carriers[11] = 0\n carriers[12] = 1 + 1j\n carriers[13] = 0\n carriers[14] = 0\n carriers[15] = 0\n carriers[16] = 1 + 1j\n carriers[17] = 0\n carriers[18] = 0\n carriers[19] = 0\n carriers[20] = 1 + 1j\n carriers[21] = 0\n carriers[22] = 0\n carriers[23] = 0\n carriers[24] = 1 + 1j\n carriers[25] = 0\n carriers[26] = 0\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers) * np.sqrt(13 / 6)",
"def Sequence(nbr_by_label, nbr_by_label_test, nbr_comp, plot_graph):\n normal, muta = genere_chains(nbr_comp, 4)\n n_tot = (nbr_by_label + nbr_by_label_test + 1)\n X_n = [mutation(normal, [0.1, 0.1]) for _ in range(n_tot)]\n X_m = [mutation(muta, [0.1, 0.1]) for _ in range(n_tot)]\n X_crash_n = []\n X_crash_m = []\n for seq in X_n:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_n.append(crash)\n for seq in X_m:\n crash = []\n for nucleotid in seq:\n crash.append((0 * (nucleotid == 'A') + 1 * (nucleotid == 'C') + 2 * (nucleotid == 'T') + 3 * (\n nucleotid == 'G')) * np.pi / 2)\n X_crash_m.append(crash)\n X_n = np.array(X_crash_n)\n X_m = np.array(X_crash_m)\n if plot_graph:\n plt.scatter(X_n[:, 0][:nbr_by_label], X_n[:, 0][:nbr_by_label])\n plt.scatter(X_m[:, 0][:nbr_by_label], X_m[:, 0][:nbr_by_label])\n\n plt.title(\"ADN sequences\")\n plt.show()\n training_input = {\"N\": X_n[:nbr_by_label], \"M\": X_m[:nbr_by_label]}\n test_input = {\"N\": X_n[nbr_by_label:n_tot], \"M\": X_m[nbr_by_label:n_tot]}\n return [X_n, X_m], training_input, test_input, [\"N\", \"M\"]",
"def _generateSequence(self, classifications, detections):\n det_len = len(detections)\n\n # Convert classifications and detections to input required for network\n seq_len = int(self.input_tensor.shape[1])\n fea_len = int(self.input_tensor.shape[2])\n input_data = np.zeros((seq_len,fea_len))\n\n # Add padding before and after sequence based on KEYFRAME_OFFSET\n input_data[:KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n input_data[det_len:det_len+KEYFRAME_OFFSET,0] = np.ones(KEYFRAME_OFFSET)\n # Iterate through each frame of the data\n for idx, frame_detections in enumerate(detections):\n # We have already padded before and after\n seq_idx = idx + KEYFRAME_OFFSET\n\n # Skip through frames with no detections\n if len(frame_detections) == 0:\n input_data[seq_idx][0] = 1.0\n continue\n\n detection = frame_detections[0]\n classification = classifications[idx][0]\n\n # Do a size check on input\n # We expect either 1 or 2 models per sequence\n num_species = len(classification.species)\n num_cover = len(classification.cover)\n num_loc = len(detection.location)\n num_fea = num_species + num_cover + num_loc + 2\n num_of_models = int(fea_len / num_fea)\n\n if num_of_models != 2 and num_of_models != 1:\n raise Exception('Bad Feature Length')\n\n # Layout of the feature is:\n # Species, Cover, Normalized Location, Confidence, SSD Species\n # Optional duplicate\n\n for model_idx in range(num_of_models):\n # Calculate indices of vector based on model_idx\n fea_idx = model_idx * num_fea\n species_stop = fea_idx + num_species\n cover_stop = species_stop + num_cover\n loc_stop = cover_stop + num_loc\n ssd_conf = loc_stop\n ssd_species = ssd_conf + 1\n\n input_data[seq_idx,fea_idx:species_stop] = \\\n classification.species\n input_data[seq_idx,species_stop:cover_stop] = \\\n classification.cover\n input_data[seq_idx,cover_stop:loc_stop] = \\\n self._normalizeDetection(detection.location)\n input_data[seq_idx, ssd_conf] = detection.confidence\n input_data[seq_idx, ssd_species] = detection.species\n return input_data",
"def test_sequences(self):\n self.assertEqual(self.t.get_mrna(self.chrom_seq), self.transcript_seq)\n self.assertEqual(self.t.get_cds(self.chrom_seq, in_frame=False), self.cds_seq)\n self.assertEqual(self.t.get_protein_sequence(self.chrom_seq), self.amino_acid)\n self.assertEqual(self.t.get_intron_sequences(self.chrom_seq), self.introns)",
"def long_training_symbol() -> np.ndarray:\n carriers = [0 + 0j] * 64\n carriers[-32] = 0\n carriers[-31] = 0\n carriers[-30] = 0\n carriers[-29] = 0\n carriers[-28] = 0\n carriers[-27] = 0\n carriers[-26] = 1\n carriers[-25] = 1\n carriers[-24] = -1\n carriers[-23] = -1\n carriers[-22] = 1\n carriers[-21] = 1\n carriers[-20] = -1\n carriers[-19] = 1\n carriers[-18] = -1\n carriers[-17] = 1\n carriers[-16] = 1\n carriers[-15] = 1\n carriers[-14] = 1\n carriers[-13] = 1\n carriers[-12] = 1\n carriers[-11] = -1\n carriers[-10] = -1\n carriers[-9] = 1\n carriers[-8] = 1\n carriers[-7] = -1\n carriers[-6] = 1\n carriers[-5] = -1\n carriers[-4] = 1\n carriers[-3] = 1\n carriers[-2] = 1\n carriers[-1] = 1\n carriers[0] = 0\n carriers[1] = 1\n carriers[2] = -1\n carriers[3] = -1\n carriers[4] = 1\n carriers[5] = 1\n carriers[6] = -1\n carriers[7] = 1\n carriers[8] = -1\n carriers[9] = 1\n carriers[10] = -1\n carriers[11] = -1\n carriers[12] = -1\n carriers[13] = -1\n carriers[14] = -1\n carriers[15] = 1\n carriers[16] = 1\n carriers[17] = -1\n carriers[18] = -1\n carriers[19] = 1\n carriers[20] = -1\n carriers[21] = 1\n carriers[22] = -1\n carriers[23] = 1\n carriers[24] = 1\n carriers[25] = 1\n carriers[26] = 1\n carriers[27] = 0\n carriers[28] = 0\n carriers[29] = 0\n carriers[30] = 0\n carriers[31] = 0\n return np.array(carriers)",
"def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq",
"def test_init(self):\n orig = \"\"\n r = self.SequenceClass(orig)\n self.assertEqual(str(r), orig)\n\n orig = \"TCAGGA\"\n r = self.SequenceClass(orig)\n self.assertEqual(r._data, array([6, 62]))\n self.assertEqual(str(r), orig)",
"def test_gap_array(self):\n r = self.RNA(\"-?A-?NRY-\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1, 0, 1, 1, 0, 0, 0, 1]))\n r = self.RNA(\"AC\")\n v = r.gap_array()\n self.assertEqual(v, array([0, 0]))\n r = self.RNA(\"-?\")\n v = r.gap_array()\n self.assertEqual(v, array([1, 1]))",
"def _rbSequenceInit(self):\n\n ## send all of this to sequence acq\n if not self.nbFrames:\n self.nbFrames = int(self.duration/self.cycleTime)+1 ## Determine number of frames. (+1) because int round at the lower int\n #nbGreenFrames = self.rbGreenRatio[0] #nb of green frames in each green sequence #NOT YET USED\n nbGreenSequence = float(self.nbFrames)/self.greenFrameInterval #Dividing nbFrames by the green frame interval with a float to have float division\n print('Nb of green frames : ', nbGreenSequence)\n nbGreenSequence = int(round(nbGreenSequence))\n print('Nb of green frames : ', nbGreenSequence)\n #if self.colorMode == SequenceAcquisition.rbColorModes[0]:\n colorSeq=[0,2] #R & B alternation by default\n if self.colorMode == SequenceAcquisition.rbColorModes[1]:\n colorSeq = [0] #Red only mode\n elif self.colorMode == SequenceAcquisition.rbColorModes[2]:\n colorSeq = [2] #Blue only mode\n\n self.ledList = colorSeq*int(round(float(self.nbFrames-nbGreenSequence)/len(colorSeq))) #Initiate a whole list of R-B alternance\n #list.insert(index, elem) -- inserts the element at the given index, shifting elements to the right\n greenSeqIdx = 0\n while greenSeqIdx <= self.nbFrames :\n self.ledList.insert(greenSeqIdx,1)\n greenSeqIdx+= self.greenFrameInterval\n #NB : no return needed because each ledList and nbFrames are instance attribute",
"def sequence(self):\n inigen = IniGen()\n fields = algorithm_fields.algorithms['sequence']\n\n output_uuid_map = {}\n\n # set up global parameters\n algorithm_path = fields['path']\n enabled = \"True\"\n inigen.emit_global(algorithm_path, enabled)\n\n label = \"SEQ\"\n for t in ['C','L']:\n run_label = label+'_'+t\n t1Mag_label = '{0}1MAG'.format(t)\n t2Mag_label = '{0}2MAG'.format(t)\n t3Mag_label = '{0}3MAG'.format(t)\n t1Ang_label = '{0}1ANG'.format(t)\n t2Ang_label = '{0}2ANG'.format(t)\n t3Ang_label = '{0}3ANG'.format(t)\n distillate_label = \"{0}-ALL\".format(t)\n\n # header\n inigen.emit_run_header(run_label, CHUNKING, MINTIME, MAXTIME)\n\n # body\n dep_1Mag_label = t1Mag_label\n dep_1Mag_name = fields['deps'][0]\n dep_1Mag_uuid = self.uuid_map[t1Mag_label]\n\n dep_2Mag_label = t2Mag_label\n dep_2Mag_name = fields['deps'][1]\n dep_2Mag_uuid = self.uuid_map[t2Mag_label]\n\n dep_3Mag_label = t3Mag_label\n dep_3Mag_name = fields['deps'][2]\n dep_3Mag_uuid = self.uuid_map[t3Mag_label]\n\n dep_1Ang_label = t1Ang_label\n dep_1Ang_name = fields['deps'][3]\n dep_1Ang_uuid = self.uuid_map[t1Ang_label]\n\n dep_2Ang_label = t2Ang_label\n dep_2Ang_name = fields['deps'][4]\n dep_2Ang_uuid = self.uuid_map[t2Ang_label]\n\n dep_3Ang_label = t3Ang_label\n dep_3Ang_name = fields['deps'][5]\n dep_3Ang_uuid = self.uuid_map[t3Ang_label]\n \n deps = [[dep_1Mag_label, dep_1Mag_name, dep_1Mag_uuid],\n [dep_2Mag_label, dep_2Mag_name, dep_2Mag_uuid],\n [dep_3Mag_label, dep_3Mag_name, dep_3Mag_uuid],\n [dep_1Ang_label, dep_1Ang_name, dep_1Ang_uuid],\n [dep_2Ang_label, dep_2Ang_name, dep_2Ang_uuid],\n [dep_3Ang_label, dep_3Ang_name, dep_3Ang_uuid]]\n\n param_section_name = fields['params'][0]\n param_section_value = \"Production/{0}/{1}/{2}\".format(self.location, self.name, distillate_label)\n param_name_name = fields['params'][1]\n param_name_value = \"SEQ\"\n params = [[param_section_name, param_section_value], [param_name_name, param_name_value]]\n\n outputs = fields['outputs']\n\n emitted = inigen.emit_run_body(deps, params, outputs)\n\n output_uuid_map[\"ZER_{0}ANG\".format(t)] = emitted[-9][-36:]\n output_uuid_map[\"ZER_{0}MAG\".format(t)] = emitted[-8][-36:]\n output_uuid_map[\"POS_{0}ANG\".format(t)] = emitted[-7][-36:]\n output_uuid_map[\"POS_{0}MAG\".format(t)] = emitted[-6][-36:]\n output_uuid_map[\"NEG_{0}ANG\".format(t)] = emitted[-5][-36:]\n output_uuid_map[\"NEG_{0}MAG\".format(t)] = emitted[-4][-36:]\n output_uuid_map[\"UNB_{0}NEG\".format(t)] = emitted[-3][-36:]\n output_uuid_map[\"UNB_{0}ZER\".format(t)] = emitted[-2][-36:]\n\n filename = \"{0}/SEQ_{1}.ini\".format(self.dirname, self.name)\n inigen.generate_file(filename)\n return output_uuid_map"
] | [
"0.6889651",
"0.616433",
"0.59501624",
"0.59163433",
"0.5860934",
"0.58542854",
"0.5838475",
"0.57773006",
"0.57502055",
"0.57311475",
"0.56619555",
"0.56219095",
"0.55677223",
"0.55440766",
"0.5522619",
"0.55077183",
"0.550424",
"0.5498697",
"0.5479186",
"0.54653597",
"0.54627895",
"0.5458936",
"0.5452506",
"0.5451784",
"0.54463565",
"0.5444817",
"0.5438071",
"0.5429272",
"0.5421376",
"0.5418244"
] | 0.80256546 | 0 |
ArraySequence distance should work with function of indices | def test_distance_indices(self):
s1 = self.RNA("AUGC")
s2 = self.RNA("AAGC")
def f(x, y):
if x == 2 or y == 2:
return 10
return 0
self.assertEqual(s1.distance(s2, f, use_indices=True), 20) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_distance_array(self):\n s1 = Source([[10, 10], [10, 20]], values=[1.0, 2.0])\n assert(s1.distance([20, 25]) == sqrt(200))\n assert(s1.distance(array([20, 25])) == sqrt(200))",
"def _nb_vector_tcrdist(indices, seqs_mat, seqs_L, distance_matrix=tcr_nb_distance_matrix, dist_weight=3, gap_penalty=4, ntrim=3, ctrim=2, fixed_gappos=True):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\"\"\"\n for i in range(ntrim, q_L - ctrim):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]] * dist_weight\n continue\n\n short_len = min(q_L, s_L)\n len_diff = abs(q_L - s_L)\n if fixed_gappos:\n min_gappos = min(6, 3 + (short_len - 5) // 2)\n max_gappos = min_gappos\n else:\n min_gappos = 5\n max_gappos = short_len - 1 - 4\n while min_gappos > max_gappos:\n min_gappos -= 1\n max_gappos += 1\n min_dist = -1\n # min_count = -1\n for gappos in range(min_gappos, max_gappos + 1):\n tmp_dist = 0\n # tmp_count = 0\n remainder = short_len - gappos\n for n_i in range(ntrim, gappos):\n \"\"\"n_i refers to position relative to N term\"\"\"\n # print (n_i, shortseq[i], longseq[i], distance_matrix[shortseq[i]+longseq[i]])\n tmp_dist += distance_matrix[seqs_mat[query_i, n_i], seqs_mat[seq_i, n_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos1:', gappos, remainder, dist[seq_i])\n for c_i in range(ctrim, remainder):\n \"\"\"c_i refers to position relative to C term, counting upwards from C term\"\"\"\n tmp_dist += distance_matrix[seqs_mat[query_i, q_L - 1 - c_i], seqs_mat[seq_i, s_L - 1 - c_i]]\n # tmp_count += 1\n #print('sequence_distance_with_gappos2:', gappos, remainder, dist[seq_i])\n if tmp_dist < min_dist or min_dist == -1:\n min_dist = tmp_dist\n # min_count = tmp_count\n if min_dist == 0:\n break\n dist[ind_i] = min_dist * dist_weight + len_diff * gap_penalty\n return dist",
"def _nb_vector_editdistance(indices, seqs_mat, seqs_L, distance_matrix=identity_nb_distance_matrix, gap_penalty=1):\n assert seqs_mat.shape[0] == seqs_L.shape[0]\n mx_L = nb.int_(np.max(seqs_L))\n\n dist = np.zeros(indices.shape[0], dtype=np.int16)\n \n \"\"\"As long as ldmat is big enough to accomodate the largest sequence\n its OK to only use part of it for the smaller sequences\n NOTE that to create a 2D array it must be created 1D and reshaped\"\"\"\n ldmat = np.zeros(mx_L * mx_L, dtype=np.int16).reshape((mx_L, mx_L))\n for ind_i in nb.prange(indices.shape[0]):\n query_i = indices[ind_i, 0]\n seq_i = indices[ind_i, 1]\n \n q_L = seqs_L[query_i]\n s_L = seqs_L[seq_i]\n if q_L == s_L:\n \"\"\"No gaps: substitution distance\n This will make it differ from a strict edit-distance since\n the optimal edit-distance may insert same number of gaps in both sequences\"\"\"\n #tmp_dist = 0\n for i in range(q_L):\n dist[ind_i] += distance_matrix[seqs_mat[query_i, i], seqs_mat[seq_i, i]]\n #dist[ind_i] = tmp_dist\n continue\n \n \"\"\"Do not need to re-zero each time\"\"\"\n # ldmat = np.zeros((q_L, s_L), dtype=np.int16)\n for row in range(1, q_L):\n ldmat[row, 0] = row * gap_penalty\n\n for col in range(1, s_L):\n ldmat[0, col] = col * gap_penalty\n \n for col in range(1, s_L):\n for row in range(1, q_L):\n ldmat[row, col] = min(ldmat[row-1, col] + gap_penalty,\n ldmat[row, col-1] + gap_penalty,\n ldmat[row-1, col-1] + distance_matrix[seqs_mat[query_i, row-1], seqs_mat[seq_i, col-1]]) # substitution\n dist[ind_i] = ldmat[row, col]\n return dist",
"def distances(self):",
"def distance(self, *args):\n return _osgAnimation.SwigPyIterator_distance(self, *args)",
"def _dist(x, a, w):\n m_xa = 0\n for k in range(len(x)):\n m_xa += (x[k] - a[k])**2 * w[k]\n return m_xa",
"def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist",
"def _computeDistances(self) -> None:\n length = len(self.data)\n for i, sequenceOne in enumerate(self.data):\n print(f\"[SeqCluBaselineOffline] Computing distances is at iteration {i} of {length}.\")\n for j, sequenceTwo in enumerate(self.data):\n if i == j:\n self.distances[i][j] = 0\n continue\n distance = self.distanceMeasure.calculateDistance(sequenceOne, sequenceTwo)\n self.distances[i][j] = distance\n self.distances[j][i] = distance",
"def _distance(pts: npt.ArrayLike, stacked: bool = True) -> float:\n if np.ma.isMaskedArray(pts):\n pts = pts.data\n if not stacked:\n pts = np.transpose(pts)\n nrow, ncol = pts.shape\n result = np.sqrt(np.sum(pts.T @ pts * np.identity(ncol)) / nrow)\n return result",
"def DistanceToIdx(A, B, idx):\n C= A[idx]\n return np.sqrt(np.sum(np.square(C-B),axis=1,keepdims=True))",
"def _distanceToCluster(self, prototypeIndices: ndarray, sequenceIdx: int) -> float:\n return np.sum(self.distances[sequenceIdx][prototypeIndices])",
"def compute_distances(self, X):\n #print(X.shape, self.Xtr.shape)\n dists = np.zeros((X.shape[0], self.Xtr.shape[0]))\n for i in range(X.shape[0]):\n X_r = np.tile(X[i], (self.Xtr.shape[0], 1))\n dists[i] = np.sqrt(np.sum(np.square(self.Xtr - X_r), axis = 1))\n #print(dists.shape)\n return dists",
"def compute_distances(src):\n rr = np.vstack((src[0]['rr'][src[0]['inuse'].astype(np.bool)],\n src[1]['rr'][src[1]['inuse'].astype(np.bool)]))\n return distance.squareform(distance.pdist(rr))",
"def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))",
"def distance(self, *args):\n return _libsbml.SwigPyIterator_distance(self, *args)",
"def distance(self, *args):\n return _SALOMERuntime.SALOMERuntime_PySwigIterator_distance(self, *args)",
"def distance(brd1,brd2):\n\n step=brd1[1,0]-brd1[0,0]\n return np.sum(np.abs(brd1[:,1]-brd2[:,1]))*step",
"def distance(sig):\n df_sig = np.diff(sig)\n return np.sum([np.sqrt(1+df**2) for df in df_sig])",
"def distance(self, keyOne, keyTwo):",
"def dist(a, b, i, j):\n return np.sqrt(sqrSum(a, b, i, j))",
"def diff_index_calc(oct_abund_list1, oct_abund_list2):\n rel_index_list = []\n abs_index_list = []\n smty_index_list = []\n for i in range(10):\n abund_data_array = sc.asarray(oct_abund_list1[i], dtype='double')\n abund_sim_array = sc.asarray(oct_abund_list2[i], dtype = 'double')\n \n # make the length of the arrays similar to each other\n if len(abund_data_array) < len(abund_sim_array):\n small_len = abund_data_array\n long_len = abund_sim_array\n else:\n small_len = abund_sim_array\n long_len = abund_data_array\n diff = len(long_len) - len(small_len) \n small_len = sc.append(small_len, [0]*diff)\n \n relative_index_vect = abs(long_len - small_len)/long_len \n rel_index_list.append(sum(relative_index_vect)/len(relative_index_vect))\n \n absolute_index_vect = abs(long_len - small_len)\n abs_index_list.append(sum(absolute_index_vect)/len(absolute_index_vect))\n \n similarity_index_vect = []\n for i in range(len(long_len)):\n similarity_index_vect.append(sc.minimum(long_len[i], small_len[i])/sc.amax([long_len[i], small_len[i]]))\n \n smty_index_list.append(sum(similarity_index_vect)/len(similarity_index_vect)) \n \n rel_index_final = sum(rel_index_list)/10\n abs_index_final = sum(abs_index_list)/10\n smty_index_final = sum(smty_index_list)/10\n \n return (rel_index_final, abs_index_final, smty_index_final)",
"def compute_distance(traj1, traj2, matched_pos):\n distance = np.zeros((len(matched_pos),), dtype=float)\n for i in range(len(matched_pos)):\n if matched_pos[i] == -1:\n continue\n else:\n iou = bbox_overlap(traj1[i, 2:6], traj2[matched_pos[i], 2:6])\n distance[i] = iou\n return distance",
"def test_poincare_distances_batch(self):\n vector_1 = self.vectors['dog.n.01']\n vectors_2 = self.vectors[['mammal.n.01', 'dog.n.01']]\n distances = self.vectors.vector_distance_batch(vector_1, vectors_2)\n self.assertTrue(np.allclose(distances, [4.5278745, 0]))",
"def direct_distance(a, b):\n\n if a[0] == b[0]:\n return abs(a[1] - b[1]) - 1\n if a[1] == b[1]:\n return abs(a[0] - b[0]) - 1\n return abs(a[0] - b[0]) - 1",
"def distance_callback(from_index, to_index):\n from_node = manager.IndexToNode(from_index)\n to_node = manager.IndexToNode(to_index)\n return data['distance_matrix'][from_node][to_node]",
"def _dist(A, B):\n return np.sqrt(np.einsum(\"ijk->ij\", (A[:, None, :] - B) ** 2))",
"def compute_distance(self):\n loc = np.extend_dims(self.state[:, :, Boids.Attr.LOC], axis=-1)\n m = np.tile(loc, (1, 1, self.num_boids))\n pos_diff = m-m.transpose(0, 2, 1)\n self.distance = np.linalg.norm(pos_diff, axis=0)",
"def get_positional_distance(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The permutations must be of the same size.\")\n return sum([abs(a[i] - b[i]) for i in xrange(len(a))])",
"def distance_map(xs1, xs2):\n return jax.vmap(lambda x1: jax.vmap(lambda x2: euclidean_distance(x1, x2))(xs2))(xs1)",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5"
] | [
"0.6781516",
"0.62706697",
"0.6230655",
"0.6199052",
"0.6007264",
"0.5988063",
"0.5955639",
"0.5908289",
"0.588692",
"0.58559555",
"0.579329",
"0.5787277",
"0.57607883",
"0.57606375",
"0.5740236",
"0.5734664",
"0.5719815",
"0.5713297",
"0.56995946",
"0.5692462",
"0.5658256",
"0.56456345",
"0.5635951",
"0.5631063",
"0.5574855",
"0.5554869",
"0.5527248",
"0.5521962",
"0.5510343",
"0.55045605"
] | 0.64148456 | 1 |
Sequence strip_bad_and_gaps should remove gaps and bad chars | def test_strip_bad_and_gaps(self):
# have to turn off check to get bad data in; no longer preserves case
r = self.RNA("ACG--GRN?")
self.assertEqual(r.strip_bad_and_gaps(), "ACGGRN")
r._data[0] = 99
self.assertEqual(r.strip_bad_and_gaps(), "CGGRN") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_strip_bad_and_gaps(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UxxCAGwsnyrHBNz#!D-D\", check=False).strip_bad_and_gaps(),\n \"UCAGWSNYRHBNDD\",\n )\n self.assertEqual(\n self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad_and_gaps(), \"\"\n )\n self.assertEqual(\n self.RNA(\"aaa ggg ---!ccc\", check=False).strip_bad_and_gaps(), \"AAAGGGCCC\"\n )",
"def ungapped(self):\n s = self.sequence\n for sGapChar in GAP_CHARACTERS:\n s = s.replace(sGapChar, '')\n return s",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n self.assertEqual(\n self.RNA(\"UCxxxAGwsnyrHBNzzzD-D\", check=False).strip_bad(),\n \"UCAGWSNYRHBND-D\",\n )\n self.assertEqual(self.RNA(\"@#^*($@!#&()!@QZX\", check=False).strip_bad(), \"\")\n self.assertEqual(\n self.RNA(\"aaaxggg---!ccc\", check=False).strip_bad(), \"AAAGGG---CCC\"\n )",
"def test_strip_bad(self):\n # have to turn off check to get bad data in; no longer preserves case\n r = self.RNA(\"UCAGRYU\")\n r._data[0] = 31\n r._data[2] = 55\n self.assertEqual(r.strip_bad(), \"CGRYU\")",
"def strip_other_charcter():\n pass",
"def cleaning_sequence_regex(sequence):\n amb = re.compile(r\"[^ACGT]\")\n return amb.sub(\"\", sequence)",
"def test_strip_degenerate(self):\n self.assertEqual(self.RNA(\"UCAG-\").strip_degenerate(), \"UCAG-\")\n self.assertEqual(self.RNA(\"NRYSW\").strip_degenerate(), \"\")\n self.assertEqual(self.RNA(\"USNG\").strip_degenerate(), \"UG\")",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def clean_text_from_nonbasic_characters(text):\n text = re.sub(r\"([^\\u0000-\\u007F])\", \" \", text)\n text = replace_newline_with_space(text).strip()\n text = text.replace(\"_\", \"\")\n text = clean_text_from_multiple_consecutive_whitespaces(text)\n return text",
"def test_consistent_gap_degen_handling(self):\n # the degen character '?' can be a gap, so when we strip either gaps or\n # degen characters it should be gone too\n raw_seq = \"---??-??TC-GGCG-GCA-G-GC-?-C-TAN-GCGC-CCTC-AGGA?-???-??--\"\n raw_ungapped = re.sub(\"[-?]\", \"\", raw_seq)\n raw_no_ambigs = re.sub(\"[N?]+\", \"\", raw_seq)\n dna = self.DNA(raw_seq)\n self.assertEqual(dna.degap(), raw_ungapped)\n self.assertEqual(dna.strip_degenerate(), raw_no_ambigs)\n self.assertEqual(dna.strip_bad_and_gaps(), raw_ungapped)",
"def fix_ending(x):\n x = strip_stoich_wrapper(x)\n x = re.sub(r'(?<=[a-zA-Z])\\-(?=[a-zA-Z]$)', ' ', x)\n return x",
"def _remove_left_padded_special_chars(self, text: str) -> str:\n pattern = re.compile(\"\\ +[^A-Za-z0-9\\n]\")\n text = re.sub(pattern, \" \", text)\n return text",
"def _removeRepetitions(s, encod='utf-8'): \n if not isinstance(s, unicode):\n s = unicode(s, encod,'replace')\n \n # Remove additional caracters \n s = re.sub(r'(\\w)\\1{2,100}', r'\\1', s) \n # Remove additional white spaces \n s = re.sub( '\\s+', ' ', s ).strip() \n \n return s",
"def lstrip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n\n # count continuous sequence of chars from left and right\n nlen = len(clean)\n nlstripped = nlen - len(clean.lstrip(chars))\n # within the stripped regions, only retain parts of the raw\n # string *not* matching the clean string (these are ansi/mxp tags)\n lstripped = \"\"\n ic, ir1 = 0, 0\n while nlstripped:\n if ic >= nlstripped:\n break\n elif raw[ir1] != clean[ic]:\n lstripped += raw[ir1]\n else:\n ic += 1\n ir1 += 1\n return ANSIString(lstripped + raw[ir1:])",
"def strip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n\n # count continuous sequence of chars from left and right\n nlen = len(clean)\n nlstripped = nlen - len(clean.lstrip(chars))\n nrstripped = nlen - len(clean.rstrip(chars))\n\n # within the stripped regions, only retain parts of the raw\n # string *not* matching the clean string (these are ansi/mxp tags)\n lstripped = \"\"\n ic, ir1 = 0, 0\n while nlstripped:\n if ic >= nlstripped:\n break\n elif raw[ir1] != clean[ic]:\n lstripped += raw[ir1]\n else:\n ic += 1\n ir1 += 1\n rstripped = \"\"\n ic, ir2 = nlen - 1, len(raw) - 1\n while nrstripped:\n if nlen - ic > nrstripped:\n break\n elif raw[ir2] != clean[ic]:\n rstripped += raw[ir2]\n else:\n ic -= 1\n ir2 -= 1\n rstripped = rstripped[::-1]\n return ANSIString(lstripped + raw[ir1 : ir2 + 1] + rstripped)",
"def cleaning(string, EOS=False):\n\n # before cleaning up, first identify end of the sentences (EOS)\n if EOS:\n pLu = '[{}]'.format(\"\".join([chr(i) for i in range(sys.maxunicode) if chr(i).isupper()]))\n EOS = re.compile(r'([a-z]+|[ş|ı])(\\. )((' + pLu + '[a-z]?)|([0-9]+))')\n string = EOS.sub(r'\\1#\\3', string)\n\n # period at the end of the sentences are being replaced with hastag (#)\n string = string.lower()\n mapping = {}\n mapping['99_807'] = 231\n mapping['105_770'] = 105\n mapping['117_770'] = 117\n mapping['105_775'] = 105\n mapping['117_776'] = 252\n mapping['115_807'] = 351\n mapping['103_774'] = 287\n mapping['97_770'] = 97\n mapping['111_776'] = 246\n mapping['97_785'] = 97\n Alist = {97, 99, 103, 105, 111, 115, 117}\n solv_prob = []\n flag = False\n for i, c in enumerate(string):\n if flag:\n flag = False\n continue # pass this character\n if not ord(c) in Alist:\n solv_prob.append(c) # no need to check this character\n else:\n if i == len(string) - 1:\n continue\n cn = string[i + 1] # next character\n key = '{}_{}'.format(ord(c), ord(cn)) # creating string with their ordinal\n if key in mapping.keys(): # cheking if this is to be mapped\n solv_prob.append(chr(mapping[key])) # append the mapped character to the list\n flag = True # raising flag to pass next character\n continue\n else:\n solv_prob.append(c)\n\n data = ''.join(solv_prob)\n data = data.replace('iğdır', 'ığdır')\n data = data.replace('irak', 'ırak')\n # Data= [d if len(d) > 0 else '#' for d in data.splitlines()] # removing empty lines\n return data",
"def _strip(s, chars):\n # leading characters\n while len(s) > 0 and s[0] in chars:\n s = s[1:]\n # trailing characters\n while len(s) > 0 and s[-1] in chars:\n s = s[:-1]\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def _strip_invalid_xml(s):\n if _badchars_re.search(s):\n return ''.join(c for c in s if c >= ' ' or c in '\\r\\n\\t')\n else:\n return s",
"def test_preprocess_bad_chars_in_mapping(self):\r\n\r\n # Should discard all reads due to sequence length being too short\r\n # But should not halt due to bad characters in a data field\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_datafield_f\r\n barcode_type = \"golay_12\"\r\n min_seq_len = 200\r\n max_seq_len = 1000\r\n min_qual_score = 25\r\n starting_ix = 1\r\n keep_primer = False\r\n max_ambig = 0\r\n max_primer_mm = 1\r\n trim_seq_len = True\r\n dir_prefix = self.output_dir\r\n max_bc_errors = 2\r\n max_homopolymer = 4\r\n retain_unassigned_reads = False\r\n keep_barcode = False\r\n attempt_bc_correction = True\r\n qual_score_window = 0\r\n disable_primer_check = False\r\n reverse_primers = 'disable'\r\n record_qual_scores = False\r\n discard_bad_windows = False\r\n median_length_filtering = None\r\n added_demultiplex_field = None\r\n\r\n preprocess(fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)\r\n\r\n output_seqs = open(dir_prefix + \"seqs.fna\", \"U\")\r\n output_log = open(dir_prefix + \"split_library_log.txt\", \"U\")\r\n output_histograms = open(dir_prefix + \"histograms.txt\", \"U\")\r\n\r\n actual_seqs = [line for line in output_seqs]\r\n actual_log = [line for line in output_log]\r\n actual_histograms = [line for line in output_histograms]\r\n\r\n expected_seqs = []\r\n expected_log = [\r\n 'Number raw input seqs\\t6\\n',\r\n '\\n',\r\n 'Length outside bounds of 200 and 1000\\t6\\n',\r\n 'Num ambiguous bases exceeds limit of 0\\t0\\n',\r\n 'Missing Qual Score\\t0\\n',\r\n 'Mean qual score below minimum of 25\\t0\\n',\r\n 'Max homopolymer run exceeds limit of 4\\t0\\n',\r\n 'Num mismatches in primer exceeds limit of 1: 0\\n',\r\n '\\n',\r\n 'Sequence length details for all sequences passing quality filters:\\n',\r\n 'No sequences passed quality filters for writing.\\n',\r\n '\\n',\r\n 'Barcodes corrected/not\\t0/0\\n',\r\n 'Uncorrected barcodes will not be written to the output fasta file.\\n',\r\n 'Corrected barcodes will be written with the appropriate barcode category.\\n',\r\n 'Corrected but unassigned sequences will not be written unless --retain_unassigned_reads is enabled.\\n',\r\n '\\n',\r\n 'Total valid barcodes that are not in mapping file\\t0\\n',\r\n 'Sequences associated with valid barcodes that are not in the mapping file will not be written.\\n',\r\n '\\n',\r\n 'Barcodes in mapping file\\n',\r\n 'Sample\\tSequence Count\\tBarcode\\n',\r\n 's2\\t0\\tAGAGTCCTGAGC\\n',\r\n 's1\\t0\\tACACATGTCTAC\\n',\r\n 's3\\t0\\tAACTGTGCGTAC\\n',\r\n '\\n',\r\n 'Total number seqs written\\t0']\r\n expected_histograms = [\r\n '# bins raw sequence lengths, length of sequences that pass quality filters before processing, and lengths of sequences that pass quality filters post processing.\\n',\r\n 'Length\\tRaw\\tBefore\\tAfter\\n',\r\n '20\\t2\\t0\\t0\\n',\r\n '30\\t4\\t0\\t0']\r\n\r\n self.assertEqual(actual_seqs, expected_seqs)\r\n self.assertEqual(actual_log, expected_log)\r\n self.assertEqual(actual_histograms, expected_histograms)\r\n\r\n '''# With invalid character in a SampleID, should raise ValueError\r\n\r\n fasta_files = [self.sample_fasta_file]\r\n qual_files = [self.sample_qual_file]\r\n mapping_file = self.sample_mapping_bad_char_sampleid_f\r\n barcode_type=\"golay_12\"\r\n min_seq_len=200\r\n max_seq_len=1000\r\n min_qual_score=25\r\n starting_ix=1\r\n keep_primer=False\r\n max_ambig=0\r\n max_primer_mm=1\r\n trim_seq_len=True\r\n dir_prefix=self.output_dir\r\n max_bc_errors=2\r\n max_homopolymer=4\r\n retain_unassigned_reads=False\r\n keep_barcode=False\r\n attempt_bc_correction=True\r\n qual_score_window=0\r\n disable_primer_check=False\r\n reverse_primers='disable'\r\n record_qual_scores=False\r\n discard_bad_windows=False\r\n median_length_filtering=None\r\n added_demultiplex_field=None\r\n\r\n\r\n self.assertRaises(ValueError, preprocess, fasta_files,\r\n qual_files,\r\n mapping_file,\r\n barcode_type,\r\n min_seq_len,\r\n max_seq_len,\r\n min_qual_score,\r\n starting_ix,\r\n keep_primer,\r\n max_ambig,\r\n max_primer_mm,\r\n trim_seq_len,\r\n dir_prefix,\r\n max_bc_errors,\r\n max_homopolymer,\r\n retain_unassigned_reads,\r\n keep_barcode,\r\n attempt_bc_correction,\r\n qual_score_window,\r\n disable_primer_check,\r\n reverse_primers,\r\n record_qual_scores,\r\n discard_bad_windows,\r\n median_length_filtering,\r\n added_demultiplex_field)'''",
"def clean_whitespaces(text):\n length = len(text)\n i = 0\n prev_char = None\n while i < length:\n curr_char = text[i]\n return_char = curr_char if curr_char not in string.whitespace else \" \"\n\n if not (prev_char == \" \" and return_char == \" \"):\n yield return_char\n\n prev_char = return_char\n i += 1",
"def custom_strip(string, char):\n #beginning\n difference = 0\n while len(string) > 0 and string[0] == char:\n string = string[1:]\n difference += 1 #count the number of character removed at the beginning\n #end\n while len(string) > 0 and string[-1] == char:\n string = string[:-1]\n return (string, difference)",
"def rstrip(self, chars=None):\n clean = self._clean_string\n raw = self._raw_string\n nlen = len(clean)\n nrstripped = nlen - len(clean.rstrip(chars))\n rstripped = \"\"\n ic, ir2 = nlen - 1, len(raw) - 1\n while nrstripped:\n if nlen - ic > nrstripped:\n break\n elif raw[ir2] != clean[ic]:\n rstripped += raw[ir2]\n else:\n ic -= 1\n ir2 -= 1\n rstripped = rstripped[::-1]\n return ANSIString(raw[: ir2 + 1] + rstripped)",
"def cleaning_up(self):\n # find all non-letter-no-digit except whitespace and \"-\"\n try:\n pattern = re.compile(\"[a-zA-Z0-9\\\\s\\\\-]\")\n badChars = re.sub(pattern, '', string.printable)\n logging.debug(\"Bad chars: {}\".format(badChars))\n # define translate table\n remap = dict.fromkeys(badChars)\n logging.debug(remap)\n table = str.maketrans(remap)\n result = \"\"\n with open(self.input) as infile:\n lines = (line.strip() for line in infile)\n for line in lines:\n if len(line) == 0:\n continue\n else:\n logging.debug(line)\n result = result + \" \" + line.translate(table)\n # Since the input file only has one line, we can use the following\n # code. For general use, I kept above code.\n # result = line.translate(remap)\n # break;\n except LookupError as e:\n logging.exception(\"Lookup Error: {}\".format(e.strerror))\n except IOError as e:\n logging.exception(\"IO Error: {}\".format(e.strerror))\n except:\n logging.exception(\"Unknown Error\")\n return result.strip()",
"def get_sequence_without_gaps_or_padding(sequence: str) -> str:\n return sequence.replace(dc_constants.GAP_OR_PAD,\n '').replace(dc_constants.GAP_OR_PAD, '')",
"def minimalTextCleaning(row, field):\n\n # force encoding\n encoded_text = row[field].encode(encoding = 'ascii',errors = 'replace')\n decoded_text = encoded_text.decode(encoding='ascii',errors='strict')\n remove_funky_chars = str(decoded_text).replace(\"?\", \" \")\n lower_case = str(remove_funky_chars).lower().strip()\n\n # strip redundant whitespace\n cleaned_text = re.sub(' +', ' ', lower_case)\n\n\n # strip signature lines\n cleaned_text = cleaned_text.replace(\"_\", \"\")\n\n return cleaned_text",
"def remove_bad_chars(val):\n if val == '-':\n return None\n return val",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 0xFFFD or _is_control(char):\n continue # pragma: no cover\n if _is_whitespace(char):\n output.append(\" \")\n else:\n output.append(char)\n return \"\".join(output)",
"def normalize_text(text,pad_punc='!\"#$%&\\'()*+,-./:;<=>?@[\\\\]^_`{|}~',remove_punc='!\"#$%&\\'()*+,-/:;<=>?@[\\\\]^_`{|}~',remove_number='[0-9]',chars=False):\n punc_spaces = re.compile('([%s])' % re.escape(pad_punc))\n punc = re.compile('[%s]' % re.escape(remove_punc))\n text = text.lower()\n if chars:\n text = re.sub(punc,'',text)\n else:\n text = re.sub('\\.{3,}',' dots',text)\n text = re.sub(punc_spaces, r' \\1 ', text)\n text = re.sub(remove_number,'',text)\n text = re.sub(punc,'',text)\n text = re.sub(r'\\b((?![ai])[a-z])\\b','',text)\n text = re.sub('\\s{2,}', ' ', text)\n text = re.sub('\\n', ' ', text)\n text = re.sub('\\t', ' ', text)\n text=text.strip()\n \n return text",
"def _clean_text(self, text):\n output = []\n for char in text:\n cp = ord(char)\n if cp == 0 or cp == 65533 or _is_control(char):\n continue\n if _is_whitespace(char):\n output.append(' ')\n else:\n output.append(char)\n return ''.join(output)"
] | [
"0.76906425",
"0.6872599",
"0.686638",
"0.6582646",
"0.6464975",
"0.6438769",
"0.63625026",
"0.6273571",
"0.6273571",
"0.6253653",
"0.6191777",
"0.6101316",
"0.60952926",
"0.6083035",
"0.602748",
"0.60102254",
"0.59808695",
"0.5978832",
"0.5978832",
"0.59764034",
"0.59651965",
"0.595399",
"0.594863",
"0.5915078",
"0.5907393",
"0.59051",
"0.5902576",
"0.5886974",
"0.5873392",
"0.58502686"
] | 0.7566995 | 1 |
abseq array seq should count characters | def test_count_ab(self):
AB = get_moltype("ab")
seq = AB.make_array_seq("aaba-", alphabet=AB.alphabet.with_gap_motif())
c = seq.counts()
self.assertEqual(c.to_dict(), {"a": 3, "b": 1})
c = seq.counts(allow_gap=True)
self.assertEqual(c.to_dict(), {"a": 3, "b": 1, "-": 1}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_seqeunce_helper(i, text, dsl, char_count, char_to_n, extra, length = seq_length):\n\n seq_int = [] # Sequence mapped to integers\n output_seq = np.zeros((length, char_count)) # Output sequence which will become one item in input array \n\n # Get the next sequence and map its characters to integers\n for v in text[i * length + extra : (i + 1) * length + extra]:\n # If the seed_text is missing a character we append 0\n if v in char_to_n:\n seq_int.append(char_to_n[v])\n else:\n seq_int.append(0)\n\n # For character in sequence\n for j in range(length):\n # Set column corrpsonding to that character to 1\n output_seq[j][seq_int[j]] = 1.0 \n\n return output_seq",
"def count(seq):\n\treturn sum(1 for x in seq)",
"def test_len_seq(self):\n m, seq = DNA.make_seq(\"ACGGT--A\").parse_out_gaps()\n self.assertEqual(len_seq(m), 6)",
"def generate_aa_sequence_for_disp(aa_seq):\n return re.sub(\"(.{50})\", \"\\\\1\\n\", aa_seq, 0, re.DOTALL)",
"def _text_array(text):\n text = \"<mxit/>\" + text\n \n ar = map(ord, text)\n pads = 16 - (len(ar) % 16)\n ar += [0] * (pads-1)\n ar.append(pads)\n \n return ar",
"def __len__(self):\n return len(self.txt_seqs)",
"def __len__(self):\n return len(self.txt_seqs)",
"def __len__(self):\n return len(self.seq)",
"def __len__(self):\n return len(self.seq)",
"def count_rasm(text, system=None):\n\n #\"there are a intersection between subsets\"\n if system == None:\n alphabetMap = dict()\n\n indx = 0\n for char in alphabet:\n alphabetMap.update({char: indx})\n indx = indx + 1\n alphabetMap.update({\" \": 70})\n p=len(alphabet)#+1 #the last one for space char\n\n else:\n for subSys in system:\n if not isinstance(subSys, list):\n raise ValueError (\"system must be list of list not list\")\n if shapeHelper.check_repetation(system):\n raise ValueError(\"there are a repetation in your system\")\n\n p = len(alphabet) - len(list(set(chain(*system)))) + len(system)\n alphabetMap = shape(system)\n n=len(text)\n A=numpy.zeros((n, p), dtype=numpy.int)\n i=0\n j=0\n charCount =[]\n for verse in text:\n verse=shapeHelper.convert_text_to_numbers(verse, alphabetMap)\n for k in range(0,p,1) :\n charCount.insert(j, verse.count(k))\n j+=1\n A[i, :] =charCount\n i+=1\n charCount=[]\n j=0\n\n return A",
"def parse_sura(n, alphabets=['ل', 'ب']):\n # getting the nth sura\n sura = quran.get_sura(n)\n # getting the ndarray dimensions\n a = len(sura)\n m = len(alphabets)\n # building ndarray with appropriate dimensions\n A = numpy.zeros((a,m), dtype=numpy.int)\n\n\n # Filling ndarray with alphabets[] occurrences\n i = 0 # number of current aya\n j = 0 # occurrences\n for aya in sura:\n for letter in alphabets:\n A[i,j] = aya.count(letter)\n j += 1\n j = 0\n i += 1\n\n return A",
"def at_frequency(self):\n result = str(self.seq).count(\"A\") + str(self.seq).count(\"T\")\n return result",
"def Ab_seq(RNs):\n seq = []\n for res in range(cf.nkey):\n randi = RNs.getR()\n for i in range(20):\n if randi < cf.cumprob20[i]:\n seq.append(i + 1) # want amino acids between 1 and 20\n break\n return seq",
"def seq_to_ohm(alph_size,sequence,chars_with_indices):\n matrix=np.zeros((alph_size,len(sequence)),dtype=int)\n for c,char in enumerate(sequence):\n matrix[index_from_char(chars_with_indices, char ),c]+=1\n return matrix",
"def encode(self, seq):",
"def seq_to_array_seq(seq, array_len=60):\n if len(seq) > array_len:\n raise Error('Sequence is too long for the array. Max of %d but %s is %d.' %\n (array_len, seq, len(seq)))\n return '%s%s' % (seq, 'T' * (array_len - len(seq)))",
"def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total",
"def _get_num_chars(a):\n if issubclass(a.dtype.type, str_):\n return a.itemsize // 4\n return a.itemsize",
"def __len__(self):\n return self.total_num_sequences",
"def get_seq_lenght(seq_arry, end_symbol):\n scale_arry = np.argmax(seq_arry, axis=2) + np.sum(seq_arry, axis=2)\n end_symbol_scale = np.argmax(end_symbol) + np.sum(end_symbol)\n cond = (scale_arry != end_symbol_scale).astype(np.int)\n lens = cond.sum(axis=1)\n return lens",
"def freqs_from_aln_array(seqs):\n result = None\n for label, seq in MinimalFastaParser(seqs):\n # Currently cogent does not support . characters for gaps, converting\n # to - characters for compatability.\n seq = ModelDnaSequence(seq.replace('.','-'))\n if result is None:\n result = zeros((len(seq.Alphabet), len(seq)),dtype=int)\n indices = arange(len(seq), dtype=int)\n result[seq._data,indices] += 1\n return Profile(result, seq.Alphabet)",
"def test_compress_max_1_seq_len(self):\n a_int = ord('a')\n seq = ''.join(map(chr, range(a_int, a_int + LZ77.max_seq + 1)))\n text = seq + '12' + seq + '1234'\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(text[:8], 'utf-8')\\\n + bytearray([0]) + bytearray(text[8: 16], 'utf-8')\\\n + bytearray([12]) + bytearray(text[16: 20], 'utf-8')\\\n + bytearray([1, 63]) + bytearray([1, 49])\\\n + bytearray('34', 'utf-8')\n self.assertEqual(actual, expected)",
"def main():\n next_val_string = '1'\n\n for counter in range(0, 31):\n print(\"{}:\\t[{}]\".format(counter, len(next_val_string)))\n next_val_list = get_next_in_seq(next_val_string)\n next_val_string = count_array_to_string(next_val_list)\n\n # and so it ends with 5808",
"def _seq2vec(seq):\n seq=_substitute_opening_gap_char(seq)\n mapper = dict([(r, i) for i, r in enumerate(aalist)])\n naa = len(seq)\n naa_types = len(aalist)\n vec = np.zeros((naa, naa_types))\n for i, res in enumerate(list(seq)):\n ind = mapper[res]\n vec[i, ind] = 1.\n return vec",
"def aa(seq):\n global codontable\n seq = seq.upper()\n if codontable is None:\n # TODO: figure out the right place for the pre-computed information here\n bases = ['T', 'C', 'A', 'G']\n codons = [a+b+c for a in bases for b in bases for c in bases]\n codons = codons + list(map(lambda x: x.lower(), codons))\n amino_acids = 'FFLLSSSSYY**CC*WLLLLPPPPHHQQRRRRIIIMTTTTNNKKSSRRVVVVAAAADDEEGGGG'\n amino_acids = amino_acids + amino_acids.lower()\n codontable = dict(zip(codons, amino_acids))\n res = ''\n for i in range(0, len(seq) - 2, 3):\n res += codontable[seq[i:(i+3)]]\n return res",
"def count_matrix(pb_seq):\n assert_same_size(pb_seq)\n pb_count = numpy.zeros((len(pb_seq[0]), len(NAMES)))\n for seq in pb_seq:\n for idx, block in enumerate(seq):\n if block in NAMES:\n pb_count[idx, NAMES.index(block)] += 1.0\n elif block not in [\"Z\", \"z\"]:\n raise InvalidBlockError(block=block)\n return pb_count",
"def seq2bytes (xs):\n count = 0\n byte = ByteContext()\n for x in xs:\n for i in range(0, x):\n if not byte.write(1):\n count += 1\n yield byte.pack()\n if not byte.write(0):\n count += 1\n yield byte.pack()\n if byte.count != 0:\n yield byte.pad()\n while count < 9:\n count += 1\n yield byte.pad()",
"def get_next_in_seq(value_string):\n result_arr = []\n cur_char = value_string[0]\n count = 1\n for next_char in value_string[1:]:\n if cur_char == next_char:\n count += 1\n else:\n result_arr.append((count, cur_char))\n count = 1\n # fun fact - if you don't reset the count,\n # you turn your laptop into a space heater (oops)\n cur_char = next_char\n\n # and take care of the last ones\n result_arr.append((count, cur_char))\n\n return result_arr",
"def __init__(self, seq, annotation=False):\n self.seq = seq\n self.length = len(seq)\n self.annotation = annotation",
"def test_compress_max_seq_len(self):\n a_int = ord('a')\n seq = ''.join(map(chr, range(a_int, a_int + LZ77.max_seq)))\n text = '123' + seq + '345' + seq\n actual = LZ77.compress(text)\n expected = bytearray([0]) + bytearray(text[:8], 'utf-8')\\\n + bytearray([0]) + bytearray(text[8: 16], 'utf-8')\\\n + bytearray([1]) + bytearray(text[16: 23], 'utf-8')\\\n + bytearray([1, 63])\n self.assertEqual(actual, expected)"
] | [
"0.65561444",
"0.6134673",
"0.60102427",
"0.59734315",
"0.59293884",
"0.5833849",
"0.5833849",
"0.58157563",
"0.58157563",
"0.5804842",
"0.5782722",
"0.5716325",
"0.57094294",
"0.5637024",
"0.56230783",
"0.5609562",
"0.5594264",
"0.55887735",
"0.55783176",
"0.5576027",
"0.5574922",
"0.5573713",
"0.55724686",
"0.5566965",
"0.5564622",
"0.5541442",
"0.5539099",
"0.55379707",
"0.5534376",
"0.5521977"
] | 0.63717175 | 1 |
Loads .refFiles within folder and returns Benchmark objects. | def load_from_folder(folder):
refs = []
for input_file in os.listdir(folder):
if input_file.endswith(".ref"):
refs.append(Reference(folder, input_file))
return refs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, benchmark_dir):\n self.__benchmark_dir = benchmark_dir\n self.__load_configs()\n self.__load_results()",
"def process(self, args):\n for benchmark_file in args.benchmark_files:\n self.process_individual_file(benchmark_file)\n self.total_files += 1",
"def load_files_to_compare(self):\n self.first_source_data = load_path(self.path1)\n self.second_source_data = load_path(self.path2)",
"def load(cls, conf, regex=None):\n try:\n path = cls.get_benchmark_file_path(conf.results_dir)\n if not os.path.isfile(path):\n raise util.UserError(f\"Benchmark list file {path} missing!\")\n d = util.load_json(path, api_version=cls.api_version)\n benchmarks = d.values()\n return cls(conf, benchmarks, regex=regex)\n except util.UserError as err:\n if \"asv update\" in str(err):\n # Don't give conflicting instructions\n raise\n raise util.UserError(\"{}\\nUse `asv run --bench just-discover` to \"\n \"regenerate benchmarks.json\".format(str(err)))",
"def load_data_from_files(self):\n # separated method to allow mock easier\n logger.info(\"Loading data...\")\n parent = Path(__file__).parent\n path = parent / \"resources\" / \"scores.txt\"\n self.scorer.load_from_file(path)\n path = parent / \"resources\" / \"american-english-large.txt\"\n self.trie.load_from_file(path)\n path = parent / \"resources\" / \"reels.txt\"\n self.reels = Reel.get_from_file(path)\n logger.info(\"Data loaded!\")",
"def find_reference_files():\n for root, _, files in os.walk(\"./tests/references/\"):\n for basename in fnmatch.filter(files, \"*.tex\"):\n yield os.path.join(root, basename)",
"def get_all_benchmarks():\n all_benchmarks = []\n for benchmark in os.listdir(BENCHMARKS_DIR):\n benchmark_path = os.path.join(BENCHMARKS_DIR, benchmark)\n if os.path.isfile(os.path.join(benchmark_path, 'benchmark.yaml')):\n all_benchmarks.append(benchmark)\n return sorted(all_benchmarks)",
"def _load_files(self):\n for filedoc in self._docset.get_files():\n path = filedoc.get_path()\n if not path:\n # In case of only partially loaded file information,\n # the path information is not set for unloaded files.\n continue\n if not os.path.isabs(path):\n path = os.path.join(self._source_root, path)\n extension = os.path.splitext(path)[1]\n # We don't care about Markdown files that only produce pages\n # (and fail the directory check below).\n if extension == '.md':\n continue\n dirdoc = filedoc.get_directory()\n if not dirdoc:\n self._reporter.xml_assert(filedoc.get_xml_path(),\n \"file is not in any directory in Doxygen\")\n continue\n relpath = self._get_rel_path(path)\n fileobj = self._files.get(relpath)\n if not fileobj:\n fileobj = File(path, relpath, self._docmap[dirdoc])\n self._files[relpath] = fileobj\n fileobj.set_doc_xml(filedoc, self)\n self._docmap[filedoc] = fileobj",
"def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}",
"def execute(self, sample_files: pd.DataFrame, reference_file: Path, ncores: int = 1) -> ExecutorResults:\n pass",
"def get_files(self):\n def _get_files_by_names(files, name_set, postfix):\n ret = []\n for f in files: \n name = osp.basename(f).split(\"_%s\" % postfix)[0]\n if name in name_set:\n ret.append(f)\n return ret\n\n frame1_files = sorted(glob.glob(osp.join(self.root, 'images', \"*_pre_disaster*\")))\n frame2_files = sorted(glob.glob(osp.join(self.root, \"images\", \"*_post_disaster*\")))\n label_files = sorted(glob.glob(osp.join(self.root, \"masks\", \"*_change*\")))\n assert len(frame1_files) == len(frame2_files) == len(label_files), \\\n \"%d, %d, %d\" % (len(frame1_files), len(frame2_files), len(label_files))\n\n file_names = [osp.basename(f).split(\"_pre\")[0] for f in frame1_files]\n file_names = sorted(list(set(file_names)))\n if self.isTrain:\n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[0]\n else: \n name_set = train_test_split(file_names, train_size=0.8, random_state=0)[1]\n self.frame1_files = _get_files_by_names(frame1_files, name_set, 'pre')\n self.frame2_files = _get_files_by_names(frame2_files, name_set, 'post')\n self.label_files = _get_files_by_names(label_files, name_set, 'change')",
"def getFileReferences():\n refNodes = pm.ls(rf=True)\n fileRefs = [r.referenceFile() for r in refNodes]\n return fileRefs",
"def _precache_reference_files(self, input_file):\n from .. import datamodels\n gc.collect()\n if self._is_association_file(input_file):\n return\n try:\n with datamodels.open(input_file) as model:\n pass\n except (ValueError, TypeError, IOError):\n self.log.info(\n 'First argument {0} does not appear to be a '\n 'model'.format(input_file))\n else:\n super(Pipeline, self)._precache_reference_files(input_file)\n for name in self.step_defs.keys():\n step = getattr(self, name)\n step._precache_reference_files(input_file)\n gc.collect()",
"def run_benchmark(env: Env, in_file):\n\n print('Running benchmarks in', in_file.name)\n # Run file_path through mlir_to_bef and bef_executor and extract the\n # benchmark result.\n return env.run_mlir(in_file.read())",
"def register_benchmarks(directory=None):\n dirs = places_to_look() if directory is None else [directory]\n for directory in dirs:\n with os.scandir(directory) as scan:\n for entry in scan:\n filename = entry.name\n if (\n filename.startswith(\".\")\n or not entry.is_file()\n or not filename.endswith(\".py\")\n ):\n continue\n if (\n filename.startswith(\"benchmark\")\n or filename.endswith(\"benchmark.py\")\n or filename.endswith(\"benchmarks.py\")\n ):\n import_path(f\"{directory}/{filename}\")",
"def read_mock_bgs_mxxl_brighttime(root_mock_dir='',mock_prefix='',\n mock_ext='hdf5',brickname_list=None):\n # Build iterator of all mock brick files\n iter_mock_files = desitarget.io.iter_files(root_mock_dir, mock_prefix, ext=mock_ext)\n \n # Might have different file types for this mock\n if mock_ext == 'hdf5':\n _load_mock_routine = _load_mock_bgs_mxxl_file_hdf5\n elif mock_ext == 'fits':\n _load_mock_routine = _load_mock_bgs_mxxl_file_fits\n else:\n raise Exception(\"No data read routine for mock file extension %s\"%(mock_ext))\n\n # Read each file\n print('Reading individual mock files')\n target_list = list()\n file_list = list()\n nfiles = 0\n for mock_file in iter_mock_files:\n nfiles += 1\n\n # Filter on bricknames\n if brickname_list is not None:\n brickname_of_target = desitarget.io.brickname_from_filename_with_prefix(mock_file,prefix=mock_prefix)\n if not brickname_of_target in brickname_list:\n continue\n \n # print(mock_file)\n data_this_file = _load_mock_routine(mock_file)\n target_list.append(data_this_file)\n file_list.append(mock_file)\n\n # Should have found some files\n assert(nfiles > 0)\n\n print('Found %d files, read %d after filtering'%(nfiles,len(target_list)))\n\n # Concatenate all the dictionaries into a single dictionary, in an order\n # determined by np.argsort applied to the base name of each path in\n # file_list.\n file_order = np.argsort([os.path.basename(x) for x in file_list])\n\n print('Combining mock files')\n full_data = dict()\n n_per_file = list()\n if len(target_list) > 0:\n for k in list(target_list[0]):\n print(' -- {}'.format(k))\n data_list_this_key = list()\n for itarget in file_order:\n data_list_this_key.append(target_list[itarget][k])\n full_data[k] = np.concatenate(data_list_this_key)\n\n # Add file and row number\n print('Adding file and row number')\n _read_mock_add_file_and_row_number(target_list,full_data)\n \n # Count number per file\n k = list(target_list[0])[0]\n n_per_file = [len(target_list[itarget][k]) for itarget in file_order]\n \n # Return source list as ordered list of (file, n_row) tuples\n sources = list()\n for ifile in file_order:\n sources.append((file_list[ifile],n_per_file[ifile]))\n\n return full_data, sources",
"def fre2mds(url, benchpath, cachepath='.cache/data/'):\n sr = 20480 # sample rate\n ws = 2048 # window size\n out = None\n try:\n # 1.download the dat file if necessary.\n retrieve_url_file(url, cachepath)\n\n # 2.read benchamark data files\n datumn = []\n mypath = benchpath\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\n chunksize = 20480\n agelist = []\n for ii, item in enumerate(onlyfiles):\n fractionlet = 0 # some segment is too short and should be drop out.\n (de, fe) = utils.load_dat(item, mypath)\n # segment by chunksize(default 20480)\n for idx, subset in enumerate(range(0, len(de), chunksize)):\n seg = de[subset: subset + chunksize]\n if len(seg) > ws:\n datumn.append(seg)\n fractionlet += 1\n agelist.append(fractionlet) # each chunk contains idx+1 segments.\n agelist = [28, 12, 52] # FIXME: The benchmark datasets originally include three run to failure tests.\n objpos = len(datumn) # This position should be used to plot object sample.\n logging.info(f'1.Benchmark data read: {objpos}')\n\n # 3.read obj data files\n mypath = cachepath # Obj data from url, cached in local .cache directory.\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\n for ii, item in enumerate(onlyfiles):\n fractionlet = 0 # some segment is too short and should be drop out.\n (de, fe) = utils.load_dat(item, mypath)\n # segment by chunksize(default 20480)\n for idx, subset in enumerate(range(0, len(de), chunksize)):\n seg = de[subset: subset + chunksize]\n if len(seg) > ws:\n datumn.append(seg)\n fractionlet += 1\n agelist.append(fractionlet) # each chunk contains idx+1 segments.\n logging.info(f'2.Total points(include obj ones): {len(datumn)}')\n # 4.fft and cluster\n frequencies, spectrum = cluster.ts2fft(datumn, sr, ws)\n clusternew_, dfnew = cluster.cluster_vectors(spectrum, False)\n df2 = mds.dev_age_compute(spectrum, frequencies, agelist) # should label at data reading phase.seg\n pos = mds.compute_mds_pos(spectrum)\n # set color for each points in df2\n df2.loc[:, 'color'] = '#000000'\n for idx, elems in enumerate(dfnew['vectors']):\n for el in elems:\n df2.loc[el, 'color'] = dfnew.loc[idx, 'color']\n logging.info(f'3.MDS pos computed.')\n # plot mds scatter chart\n plt.figure(1)\n plt.axes([0., 0., 1., 1.])\n # collections = list(range(len(pos)))\n # mask = dfnew[dfnew['cid'] == -1]['vectors'][0]\n # dispindices = list(set(collections).difference(set(mask)))\n dispindices = list(range(objpos)) # only plot benchmark points.\n plt.scatter(x=pos[dispindices, 0],\n y=pos[dispindices, 1],\n s=df2.loc[dispindices, 'age'],\n label=df2.loc[dispindices, 'color'],\n edgecolors=df2.loc[dispindices, 'color'],\n facecolors='none', marker='.', alpha=0.5, lw=1)\n hnds = []\n for idx, el in enumerate(dfnew['color']):\n pop = mpatches.Patch(color=el, label=f'C: [{dfnew[\"cid\"][idx]}], cnts:[{len(dfnew[\"vectors\"][idx])}]')\n hnds.append(pop)\n plt.legend(handles=hnds, prop={'size': 6})\n # label baseline points\n # baseline should be the first points\n # agelist = [24, 12, 24, 24, 6, ...]\n # dfnew:\n # cid, vectors\n # -1 [84,85,86,87, ...]\n # 0 [156,157, ...]\n # ...\n # 8 [0,1,2, ...]\n # should label points: 0, 24, 36, 60\n blcnts = 2\n baselineclass = [0]\n for idx, el in enumerate(agelist[0: blcnts]):\n ps = baselineclass[idx] + el\n baselineclass.append(ps)\n # find baselineclass leader points in dfnew and\n texts = []\n for obj in baselineclass: # 0, 24, 36, 60\n for idx, vecs in enumerate(dfnew['vectors']):\n if obj in vecs:\n txt = f'{obj} in C: [{dfnew[\"cid\"][idx]}]'\n texts.append(plt.text(pos[obj, 0], pos[obj, 1], txt))\n adjust_text(texts)\n # label object points\n plt.scatter(x=pos[objpos:, 0],\n y=pos[objpos:, 1],\n label=df2.loc[objpos:, 'color'],\n marker='*', s=300, color='k', alpha=0.5, lw=1)\n for txt in list(range(objpos, len(datumn))):\n plt.text(pos[txt, 0], pos[txt, 1], f'PT: {txt}', c='#000000')\n plt.show()\n logging.info('4.MDS plot finished.')\n df2.drop(df2.columns[list(range(len(df2.T) - 3))], axis=1, inplace=True)\n df2['pos_x'] = pos[:, 0]\n df2['pos_y'] = pos[:, 1]\n df2['shape'] = 0\n df2.loc[objpos:, 'shape'] = 1\n json.loads(df2.to_json())\n logging.info('5.Return to main procedure.')\n out = df2.to_json() # return a valid json string\n except requests.exceptions.ConnectionError as ce:\n logging.error(ce)\n\n return out",
"def load_files(self):\n # Needs to be implemented by child class\n raise NotImplementedError",
"def load_crawl():\n\n\tmodule_path = dirname(__file__)\n\twith open(sys.argv[1]) as csv_file:\n\t\tdata_file = csv.reader(csv_file)\n\t\ttemp = next(data_file)\n\t\tn_samples = int(temp[0])\n\t\tn_features = int(temp[1])\n\t\ttarget_names = np.array(temp[2:4])\n\t\tdata = np.empty((n_samples, n_features))\n\t\ttarget = np.empty((n_samples,), dtype=np.int)\n\n\t\tfor count, value in enumerate(data_file):\n\t\t\tif len(value)<28:\n\t\t\t\tcontinue\n\t\t\ttry:\n\t\t\t\tdata[count] = np.asarray(value[:28], dtype=np.float)\n\t\t\t\ttarget[count] = np.asarray(value[28], dtype=np.int)\t\n\t\t\texcept:\n\t\t\t\tcontinue\n\t\t\t#print \"data is \" + str(data[count])\n\t\t\t#print \"target is \" + str(target[count])\n\n\twith open(join(module_path, 'descr', 'crawl.rst')) as rst_file:\n\t\tfdescr = rst_file.read()\n\n\treturn Bunch(data=data, target=target,\n\t\t\t target_names=target_names,\n\t\t\t DESCR=fdescr,\n\t\t\t feature_names = ['evalCount', 'setInterval', 'setTimeout', 'link', \n\t\t\t\t\t\t\t 'search', 'exec','escape', 'unescape', 'ratio', \n\t\t\t\t\t\t\t 'emtropyAvg', 'entropyScript', 'longStrings', \n\t\t\t\t\t\t\t 'maxEntropy', 'stringAvg', 'maxLength', 'longVarFunc', \n\t\t\t\t\t\t\t 'stringAssignments', 'stringModFuncsCount', 'eventFuncsCount', \n\t\t\t\t\t\t\t 'domModFuncsCounter', 'suspStrings', 'whiteSpaceRatio', \n\t\t\t\t\t\t\t 'hexaStrings', 'maxNonPrintableCharactersinString', 'lineAvg', \n\t\t\t\t\t\t\t 'iframeCount', 'malTagCount', 'jsLength'])",
"def loadDirectory(self, dirname):\r\n cachelist=os.listdir(dirname)\r\n testlist=fnmatch.filter(cachelist,'*.hdf5')\r\n \r\n for file_ in testlist:\r\n print(\"Using {0}\".format(file_))\r\n \r\n files = [h5py.File(os.path.join(dirname, fn),'r') for fn in testlist]\r\n return files",
"def add_import_benchmark(name):\n relative_path = create_relative_path(\"../utils/main.swift\")\n\n # read current contents into an array\n file_contents = []\n with open(relative_path, \"r\") as f:\n file_contents = f.readlines()\n\n # the test dependencies are placed before all benchmarks, so we have to\n # insert the benchmark in the right alphabetical order after we have seen\n # all test dependencies.\n read_test_dependencies = False\n previous_benchmark_name = None\n file_new_contents = []\n for line in file_contents:\n # check if this line is a definition of a benchmark and get its name\n match = re.search(r\"import ([a-zA-Z]+)\", line)\n if match and match.group(1):\n benchmark_name = match.group(1)\n # find where to insert the new benchmark in the right alphabetical\n # order.\n if (\n name < benchmark_name\n and previous_benchmark_name is None\n or name < benchmark_name\n and name > previous_benchmark_name\n ):\n if read_test_dependencies:\n file_new_contents.append(\"import \" + name + \"\\n\" + line)\n else:\n # all test dependencies are first specified, so from now\n # on we can look where to insert the new benchmark.\n read_test_dependencies = True\n file_new_contents.append(line)\n else:\n file_new_contents.append(line)\n previous_benchmark_name = benchmark_name\n else:\n file_new_contents.append(line)\n with open(relative_path, \"w\") as f:\n for line in file_new_contents:\n f.write(line)",
"def load_from_benchmark_suite_dir(benchmark_suite_dir: str):\n\n suite_map: Dict[str, List[BenchmarkCase]] = collections.defaultdict(list)\n for benchmark_case_dir, _, _ in os.walk(benchmark_suite_dir):\n model_dir, benchmark_name = os.path.split(benchmark_case_dir)\n # Take the benchmark directory name and see if it matches the benchmark\n # naming convention:\n # <iree-driver>__<target-architecture>__<benchmark_mode>\n segments = benchmark_name.split(\"__\")\n if len(segments) != 3 or not segments[0].startswith(\"iree-\"):\n continue\n\n iree_driver, target_arch, bench_mode = segments\n bench_mode = bench_mode.split(\",\")\n\n # The path of model_dir is expected to be:\n # <benchmark_suite_dir>/<category>/<model_name>-<model_tags>\n category_dir, model_name_with_tags = os.path.split(model_dir)\n\n with open(os.path.join(benchmark_case_dir, MODEL_TOOLFILE_NAME),\n \"r\") as f:\n tool_name = f.read().strip()\n\n suite_map[category_dir].append(\n BenchmarkCase(model_name_with_tags=model_name_with_tags,\n bench_mode=bench_mode,\n target_arch=target_arch,\n driver=iree_driver,\n benchmark_case_dir=benchmark_case_dir,\n benchmark_tool_name=tool_name))\n\n return BenchmarkSuite(suite_map=suite_map)",
"def load_all(self, root_dir, file_list=None, pattern=None):\n # each file name corresponds to another date. Also tools (A, B) and others.\n\n # Select paths for training and evaluation\n if file_list is None:\n data_paths = glob.glob(os.path.join(root_dir, '*')) # list of all paths\n else:\n data_paths = [os.path.join(root_dir, p) for p in file_list]\n if len(data_paths) == 0:\n raise Exception('No files found using: {}'.format(os.path.join(root_dir, '*')))\n\n if pattern is None:\n # by default evaluate on\n selected_paths = data_paths\n else:\n selected_paths = list(filter(lambda x: re.search(pattern, x), data_paths))\n\n input_paths = [p for p in selected_paths if os.path.isfile(p) and p.endswith('.csv')]\n if len(input_paths) == 0:\n raise Exception(\"No .csv files found using pattern: '{}'\".format(pattern))\n\n if self.n_proc > 1:\n # Load in parallel\n _n_proc = min(self.n_proc, len(input_paths)) # no more than file_names needed here\n logger.info(\"Loading {} datasets files using {} parallel processes ...\".format(len(input_paths), _n_proc))\n with Pool(processes=_n_proc) as pool:\n all_df = pd.concat(pool.map(WeldData.load_single, input_paths))\n else: # read 1 file at a time\n all_df = pd.concat(WeldData.load_single(path) for path in input_paths)\n\n return all_df",
"def test_RawRun_process():\n for style in test_runs:\n test_runs[style].process()\n # now compare all images with ref\n ref = sorted(glob.glob('tests/data/processed_ref/*/*/*'))\n outputs = sorted(glob.glob('tests/data/processed/*/*/*'))\n for ref, out in zip(ref, outputs):\n print ref\n print out\n assert_image_equal(ref, out)",
"def copy_wpr_to_benchmark():\n local_path = 'data/wpr_source/*'\n remote_data_path = os.path.join(CHROMIUM_SRC, 'tools/perf/page_sets/data/')\n # Uses shutil.copy\n [copy(f, remote_data_path) for f in glob(local_path)]",
"def prepare_benchmark(trial_number, num_urls):\n telemetry_page_cycler_path = os.path.join(CHROMIUM_SRC,\n 'tools/perf/benchmarks/telemetryBenchmarks.py')\n\n # Dynamically writing python to a file\n class_template = (\"from measurements import page_cycler\\n\"\n \"import page_sets\\n\"\n \"from telemetry import benchmark\\n\\n\"\n \"class _PageCycler(benchmark.Benchmark):\\n\"\n \" options = {'pageset_repeat': 6}\\n\"\n \" @classmethod\\n\"\n \" def AddBenchmarkCommandLineArgs(cls, parser):\\n\"\n \" parser.add_option('--v8-object-stats',\\n\"\n \" action='store_true',\\n\"\n \" help='Enable detailed V8 object statistics.')\\n\"\n \" parser.add_option('--user-server-delay')\\n\"\n \" print 'IN BENCHMARK'\\n\"\n \" parser.add_option('--report-speed-index',\\n\"\n \" action='store_true',\\n\"\n \" help='Enable the speed index metric.')\\n\"\n \" parser.add_option('--cold-load-percent', type='int', \"\n \"default=50,\\n\"\n \" help='%d of page visits for which a cold load is \"\n \"forced')\\n\\n\"\n \" def CreatePageTest(self, options):\\n\"\n \" return page_cycler.PageCycler(\\n\"\n \" page_repeat = options.page_repeat,\\n\"\n \" pageset_repeat = options.pageset_repeat,\\n\"\n \" cold_load_percent = options.cold_load_percent,\\n\"\n \" record_v8_object_stats = options.v8_object_stats,\\n\"\n \" report_speed_index = options.report_speed_index)\\n\\n\")\n\n benchmark_template = (\"@benchmark.Enabled('android')\\n\"\n \"class PageCyclerUrl{0}(_PageCycler):\\n\"\n \" print 'Using page cycler'\\n\"\n \" page_set = page_sets.url{0}PageSet\\n\\n\")\n\n with open(telemetry_page_cycler_path, 'w') as f:\n f.write(class_template)\n for i in range(num_urls):\n f.write(benchmark_template.format(i))\n f.write(benchmark_template.format(str(i) + '_pc'))",
"def compute_mdsdata(benchpath, objpath):\n\n sr = 20480 # sample rate\n ws = 2048 # window size\n\n # 1.download the mat file if necessary.\n fn = objpath.rsplit('/', 1)[-1]\n cachepath = '.cache/data/'\n local = f'{cachepath}{fn}'\n if not os.path.exists(local):\n retrieve_url_file(objpath, local)\n\n # 2.read benchamark data files\n datumn = []\n mypath = benchpath\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\n chunksize = 20480\n agelist = []\n for ii, item in enumerate(onlyfiles):\n fractionlet = 0 # some segment is too short and should be drop out.\n (de, fe) = utils.load_dat(item, mypath)\n # segment by chunksize(default 20480)\n for idx, subset in enumerate(range(0, len(de), chunksize)):\n seg = de[subset: subset + chunksize]\n if len(seg) > ws:\n datumn.append(seg)\n fractionlet += 1\n agelist.append(fractionlet) # each chunk contains idx+1 segments.\n agelist = [28, 12, 52] # FIXME: The benchmark datasets originally include three run to failure tests.\n objpos = len(datumn) # This position should be used to plot object sample.\n logging.info(f'Benchmark data read {objpos} cnts of points.')\n\n # 3.read obj data files\n mypath = cachepath # Obj data from url, cached in local .cache directory.\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n\n for ii, item in enumerate(onlyfiles):\n fractionlet = 0 # some segment is too short and should be drop out.\n (de, fe) = utils.load_dat(item, mypath)\n # segment by chunksize(default 20480)\n for idx, subset in enumerate(range(0, len(de), chunksize)):\n seg = de[subset: subset + chunksize]\n if len(seg) > ws:\n datumn.append(seg)\n fractionlet += 1\n agelist.append(fractionlet) # each chunk contains idx+1 segments.\n logging.info('Total points(include obj ones): ', len(datumn))\n\n # 4.fft and cluster\n\n frequencies, spectrum = cluster.ts2fft(datumn, sr, ws)\n clusternew_, dfnew = cluster.cluster_vectors(spectrum, False)\n\n df2 = mds.dev_age_compute(spectrum, frequencies, agelist) # should label at data reading phase.seg\n pos = mds.compute_mds_pos(spectrum)\n\n logging.info(f'MDS pos computed.')\n\n # set color for each points in df2\n df2.loc[:, 'color'] = '#000000'\n for idx, elems in enumerate(dfnew['vectors']):\n for el in elems:\n df2.loc[el, 'color'] = dfnew.loc[idx, 'color']\n\n # plot mds scatter chart\n plt.figure(1)\n plt.axes([0., 0., 1., 1.])\n logging.info(f'Total {len(agelist)} cnts samples loaded.')\n # collections = list(range(len(pos)))\n # mask = dfnew[dfnew['cid'] == -1]['vectors'][0]\n # dispindices = list(set(collections).difference(set(mask)))\n dispindices = list(range(objpos)) # only plot benchmark points.\n plt.scatter(x=pos[dispindices, 0],\n y=pos[dispindices, 1],\n s=df2.loc[dispindices, 'age'],\n label=df2.loc[dispindices, 'color'],\n edgecolors=df2.loc[dispindices, 'color'],\n facecolors='none', marker='.', alpha=0.5, lw=1)\n hnds = []\n for idx, el in enumerate(dfnew['color']):\n pop = mpatches.Patch(color=el, label=f'C: [{dfnew[\"cid\"][idx]}], cnts:[{len(dfnew[\"vectors\"][idx])}]')\n hnds.append(pop)\n plt.legend(handles=hnds, prop={'size': 6})\n # label baseline points\n # baseline should be the first points\n # agelist = [24, 12, 24, 24, 6, ...]\n # dfnew:\n # cid, vectors\n # -1 [84,85,86,87, ...]\n # 0 [156,157, ...]\n # ...\n # 8 [0,1,2, ...]\n # should label points: 0, 24, 36, 60\n blcnts = 2\n baselineclass = [0]\n for idx, el in enumerate(agelist[0: blcnts]):\n ps = baselineclass[idx] + el\n baselineclass.append(ps)\n # find baselineclass leader points in dfnew and\n texts = []\n for obj in baselineclass: # 0, 24, 36, 60\n for idx, vecs in enumerate(dfnew['vectors']):\n if obj in vecs:\n txt = f'{obj} in C: [{dfnew[\"cid\"][idx]}]'\n texts.append(plt.text(pos[obj, 0], pos[obj, 1], txt))\n adjust_text(texts)\n\n plt.scatter(x=pos[objpos:, 0],\n y=pos[objpos:, 1],\n label=df2.loc[objpos:, 'color'],\n marker='*', s=300, color='k', alpha=0.5, lw=1)\n\n for txt in list(range(objpos, len(datumn))):\n plt.text(pos[txt, 0], pos[txt, 1], f'Object {txt}', c='#000000')\n\n plt.show()\n logging.info('---over---')\n\n return 1",
"def __init__(self, files, folder, storage_method=\"raw\", force_shorten=True,\n data_dir=\"./data_og_consecutive\", tokenizer_path=\"./\"):\n Doc.__init__(self, storage_method, force_shorten, data_dir, tokenizer_path)\n self.all_docs = []\n\n for f in tqdm(files):\n doc = {}\n with open(os.path.join(folder, f)) as fp:\n tos = json.load(fp)\n for section in tos:\n # Transform dict into X/y sample\n text = section[\"Text\"]\n label = section[\"Section\"]\n doc = self.add_to_section(text, label, doc)\n\n self.all_docs.append(doc)",
"def read_data_samples(fp):\n if(path.isdir(fp)):\n fps = glob.glob(fp + '\\\\*.txt')\n return list(map(lambda x: read_file(x), fps))",
"def get_benchmark_file_path(cls, results_dir):\n return os.path.join(results_dir, \"benchmarks.json\")"
] | [
"0.57945746",
"0.573893",
"0.56740427",
"0.56193376",
"0.54738635",
"0.54364634",
"0.5314725",
"0.5297583",
"0.52758354",
"0.52714825",
"0.5262658",
"0.52590454",
"0.5200991",
"0.5175383",
"0.51303047",
"0.5125099",
"0.5123298",
"0.51226366",
"0.5115385",
"0.5082754",
"0.5074112",
"0.5073529",
"0.50584865",
"0.50492233",
"0.50442845",
"0.5029491",
"0.50235623",
"0.50226617",
"0.5012935",
"0.5011063"
] | 0.66932875 | 0 |
Creates a filename for the diff image. | def __diff_filename(self):
diff_dir = os.path.join(self.__folder, Reference.DIFF_OUT)
if not os.path.exists(diff_dir):
os.makedirs(diff_dir)
return os.path.join(diff_dir, self.__name +'.jpg') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generate_image_filename():\n now = datetime.now().strftime('%a-%w-%b-%H:%M:%S')\n return 'CCTV_{0}.jpg'.format(now)",
"def create_file_name(self):\n # create a unique id for the file name\n index = self.helpers.alpha_uuid()\n\n filename = self.form['FieldStorage'][self.image_cid].filename\n extension = guess_extension(guess_type(filename)[0])\n return ( # concatenates the following data\n self.articleData.get('directory') + # directory\n '/' + # slash\n self.articleData.get('article_name') + # the article name\n '-' + # hyphen character\n index + # the id of the image\n extension\n )",
"def generate_file_filename(instance, filename):\n return _generate_filename(instance, filename, 'photos')",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def format_image_filename(device_image):\n return \"{}-{}-{}-{}.bit\".format(device_image.bitstream_type,\n device_image.pci_vendor,\n device_image.pci_device,\n device_image.uuid)",
"def _image_filename(image_name):\n return '{}.tar'.format(image_name.replace(':', '_').replace('/', '_'))",
"def _generate_overlay_file_name(self, well, channel, desc):\n \n return \"c\" + channel + \"_w\" + well + \"_\" + desc + \".png\"",
"def _gen_image_filename(instance, filename):\n # First, store the original filename in the model\n instance.original_filename = filename\n\n return _unique_path(instance.owner.pk, filename)",
"def generate_image_name(self, image):\n return image.replace('shub://', '').replace('/', '-') + '.simg'",
"def generate_file_name(well, channel, desc):\n \n return \"bPLATE_w\" + well + \"_\" + desc + \"_c\" + channel + \".png\"",
"def filename_generate(image_class, size=12, chars=string.ascii_uppercase + string.ascii_lowercase + string.digits):\n\tnew_filename = time.strftime(\"%d-%m-%Y_\")\n\tnew_filename = new_filename + ''.join(random.choice(chars) for _ in range(size))\n\tnew_filename = new_filename + \"_P\" + str(image_class)\n\treturn new_filename",
"def make_img_name(file_ext='.png'):\r\n fn = []\r\n # format seqs and write out to temp file\r\n for i in range(0, 30):\r\n fn.append(choice(ALPHABET))\r\n return ''.join(fn) + file_ext",
"def makefilename(self):\n fp= (pathlib.Path(self.vr_folder).expanduser()/(time.strftime(self.vr_filename))).with_suffix('')\n fp.parent.mkdir(parents=True, exist_ok=True)\n print('files setup', str(fp))\n return fp",
"def generate_file_name(self):\n self._session_iterator = None # New file invalidate old interator\n self._img_count += 1\n self._current_file = '{0}/frame_{1}.jpg'.format(self._relative_path,self._img_count)\n return self.current_file",
"def generate_filename(\n radar, field, sweep, ext=\"png\", datetime_format=\"%Y%m%d%H%M%S\", use_sweep_time=False\n):\n name_s = generate_radar_name(radar).replace(\" \", \"_\")\n field_s = field.replace(\" \", \"_\")\n if use_sweep_time:\n time_s = generate_radar_time_sweep(radar, sweep).strftime(datetime_format)\n else:\n time_s = generate_radar_time_begin(radar).strftime(datetime_format)\n sweep_s = str(sweep).zfill(2)\n return f\"{name_s}_{field_s}_{sweep_s}_{time_s}.{ext}\"",
"def path_to_name(img):\n\n return os.path.dirname(img) + '_' + os.path.basename(img)",
"def _generate_filename(instance, filename, prefix):\n md5 = hashlib.md5()\n md5.update(struct.pack('f', time.time()))\n for chunk in instance.file.chunks():\n md5.update(chunk)\n extension = os.path.splitext(filename)[1]\n return os.path.join(prefix, md5.hexdigest() + extension)",
"def generate_filename(\n self, directory=os.getcwd(), prefix=\"tile\", format=\"png\", path=True\n ):\n filename = prefix + \"_{col:02d}_{row:02d}.{ext}\".format(\n col=self.column, row=self.row, ext=format.lower().replace(\"jpeg\", \"jpg\")\n )\n if not path:\n return filename\n return os.path.join(directory, filename)",
"def _output_path(name):\n output = Path(\"../Analysis Results/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(f\"{name}.png\")",
"def get_image_filename(self, filename):\n path = 'images/{folder}/{filename}'.format(\n folder=self.folder,\n filename=filename\n )\n return path",
"def image_file_name(instance, filename):\n\text = filename[-4:]\n\tnew_filename = os.path.join('images',str(instance.image_folder),str(instance.user).replace(\" \",\"\").lower()+ext)\n\treturn new_filename",
"def generateFileName(self):\n return 'Covid' + self.map_type + '.html'",
"def generate_glider_filename(description):\n filename = (\n \"{glider}-{year:d}-{day:03d}-{mission:d}-{segment}.{type}\".format(**description)\n )\n return os.path.join(description['path'], filename)",
"def outfile_name(cmd):\n return md5.md5(cmd).hexdigest()[:8]",
"def get_filename(checksum):\n return '%s.svg' % checksum",
"def _create_snapshot_name(self):\n\n return ('cinder-zfssa-nfs-snapshot-%s' %\n dt.datetime.utcnow().isoformat())",
"def file_name(id, title, kind=\"src\"):\n fn_template = conf.template_source_file_name\n if kind == \"tst\":\n fn_template = conf.template_test_file_name\n\n return fn_template.format(id=id, title=title.replace(\"-\", \"_\"))",
"def output_file_name_maker(args):\n log.debug(\"Entering output_file_name_maker()\")\n path = os.getcwd() + '/out_files/'\n if not os.path.isdir(path):\n os.mkdir(path)\n\n if args.output is None:\n out_file_name = path + args.input[:-4] + '_' + args.type + '_' + args.layer\n else:\n out_file_name = path + args.output\n\n log.debug(\"Exiting output_file_name_maker()\")\n return out_file_name",
"def GenerateImageName(cls, build_target=None, build_id=None):\n if not build_target and not build_id:\n return \"image-\" + uuid.uuid4().hex\n name = cls.IMAGE_NAME_FMT.format(\n build_target=build_target,\n build_id=build_id,\n uuid=uuid.uuid4().hex[:8])\n return cls._FormalizeName(name)",
"def generate_filename(filename: str) -> str:\n return f\"{str(uuid.uuid4())}.{get_extension(filename)}\""
] | [
"0.7496504",
"0.70392674",
"0.6811171",
"0.68030304",
"0.6780318",
"0.6759815",
"0.6705021",
"0.6657758",
"0.66381377",
"0.663154",
"0.6581436",
"0.656019",
"0.6552397",
"0.64273417",
"0.64220864",
"0.64080304",
"0.6396402",
"0.6372434",
"0.6313289",
"0.6312673",
"0.62726647",
"0.6269337",
"0.62687016",
"0.62545156",
"0.6247264",
"0.6192167",
"0.6191702",
"0.6174252",
"0.61287576",
"0.6125413"
] | 0.80227315 | 0 |
Returns complete path to reference file. | def reffile(self):
return os.path.join(self.__folder, self.__name + '.ref') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_reference(self):\n return self.__file_reference",
"def file_path(self):\n return self.lib.file_path",
"def reference(self, referencing_buildfile_path=None):\r\n dirname = os.path.dirname(self.buildfile.relpath)\r\n if referencing_buildfile_path and dirname == os.path.dirname(referencing_buildfile_path):\r\n return ':%s' % self.target_name\r\n elif os.path.basename(dirname) != self.target_name:\r\n return '%s:%s' % (dirname, self.target_name)\r\n else:\r\n return dirname",
"def _ref_path(self, name: str) -> str:\n assert name.startswith(\"refs/\")\n return posixpath.join(self._path, name)",
"def get_path(self):\n try:\n return self._file.path\n except AttributeError:\n return os.path.abspath(self._file.name)",
"def abspath(self, ref):\n \n directory, path = get_location(self.directory, ref.strip(),\n current=dirname(self.relative))\n path = join_fb_root(join(directory, path))\n return path",
"def path(self):\n return self.file_path()",
"def filepath(self):\n return self._filepath.path",
"def get_reference_street_path() -> str:\n Config.__get()\n assert Config.__config is not None\n relpath = Config.__config.get(\"wsgi\", \"reference_street\").strip()\n return get_abspath(relpath)",
"def get_file_path(self):\n return self._file_path",
"def full_path(self):\n return os.path.abspath(self.path)",
"def fpath(self):\n return os.path.join(self.path, self.name)",
"def filepath(self):\n return self.file.path",
"def get_path(self):\n\n if not self.path:\n Settings.err_print(\"missing file path\")\n return \"\"\n return self.path",
"def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)",
"def get_current_file_uri(self): # real signature unknown; restored from __doc__\n return \"\"",
"def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())",
"def rel_path(self):\n return \"{}/{}\".format(Path(self.dir_path).basename, self.index_file)",
"def getCurrentFilePath(self):\n return os.path.abspath(self.filePath)",
"def file_path() -> str:\n stack_t = inspect.stack()\n ins = inspect.getframeinfo(stack_t[1][0])\n return os.path.abspath(ins.filename)",
"def get_absolute_path(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetAbsolutePath', self.handle)",
"def file_location(self, file: str) -> str:\n return os.path.join(str(self.id), file)",
"def file_path(self) -> global___Expression:",
"def getAbsolutePath(relPath):\n currDir = os.path.dirname(__file__)\n return os.path.join(currDir, relPath)",
"def path(self) -> str:\n return self.src + \"/\"",
"def get_file_path(self):\n if self.file_path is None:\n return None\n if self.file_path.endswith('.pyc'):\n return self.file_path[:-1]\n return self.file_path",
"def file_path(self) -> Path:\n return self._input_file",
"def full_path(self) -> str:\n return self.workspace.get_full_path(self)",
"def get_full_path(self):\n try:\n full_path = os.path.abspath(self.FILENAME)\n return full_path\n except Exception as e:\n raise SystemExit(f\"Could not complete operation: {e}\")",
"def get_document_path(self):\n return pathlib.Path(urlparse(self.get_document().uri).path)"
] | [
"0.76702243",
"0.7521929",
"0.7433961",
"0.7375567",
"0.73540425",
"0.7257237",
"0.7222851",
"0.7154171",
"0.7140519",
"0.70815873",
"0.707913",
"0.69877857",
"0.6966572",
"0.69569427",
"0.6941436",
"0.6937595",
"0.69299084",
"0.6917151",
"0.69130236",
"0.69052565",
"0.6896229",
"0.6880144",
"0.6858994",
"0.68562573",
"0.6850354",
"0.68220425",
"0.6799827",
"0.67980224",
"0.67810816",
"0.67566985"
] | 0.7571063 | 1 |
Loads the positions where bugs where found. | def __load_bugs(self):
bugs = []
with open(self.reffile(), 'rb') as reffile:
reader = csv.reader(reffile, delimiter=';', quotechar='\n')
for line in reader:
bugs.append(tuple(map(int, line)))
return bugs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSimbadPositions(identifier):\n\treturn base.caches.getSesame(\"simbad\").getPositionFor(identifier)",
"def find_excited_locations(self):\n return np.asarray(np.where(self._grid == 8)).T",
"def _find_pickup_locations(self):\n if self._pickup_locations is None:\n maze_str = self._env.observations()['DEBUG.MAZE.LAYOUT'].strip()\n lines = maze_str.split('\\n')\n\n self._pickup_locations = []\n for j, line in enumerate(lines):\n for i, cell in enumerate(line):\n if cell == _PICKUP_SYMBOL:\n self._pickup_locations.append((i, j))\n return self._pickup_locations",
"def load_places():\n with open(\"resources/map.txt\", \"r\") as file:\n rows = file.readlines()\n x_max = len(rows[0].splt(\"\\t\")) # Assumes all rows contain the same number of tabs\n for y in range(len(rows)):\n cols = rows[y].splt(\"\\t\")\n for x in range(x_max):\n place_name = cols[x].replace(\"\\n\",\"\")\n if place_name == \"StartingRoom\":\n global starting_position\n starting_position = (x, y)\n if place_name == \"\":\n _world[(x, y)] = None #create a key to a dict, doesn't if cell is empty\n else:\n getattr(__import__(\"places\"), place_name)(x, y)\n \"\"\"reflect into places module, find class whose name matches place_name and\n passes the coordinates (x, y) to the constructor of the places\"\"\"\n\n \"\"\"alternative : tile_map = [[FindGoldRoom(),GoblinRoom(),None,None,None],\n [None,StartingRoom(),EmptyCave(),EmptyCave(),None]] \"\"\"",
"def find_offsets(self):\r\n\r\n #### Begin functionality here\r\n\r\n return()",
"def test_findBugfixes(self):\n bugfixes = self.builder._findChanges(\n self.project, self.builder._BUGFIX)\n self.assertEquals(\n bugfixes,\n [(23, 'Broken stuff was fixed.')])",
"def loadTiles():\n with open('resources/map.txt', 'r') as f:\n rows = f.readlines()\n global numCols\n numCols = len(rows[0].split('\\t')) # Assumes all rows contain the same number of tabs\n global numRows\n numRows = len(rows)\n for y in range(numRows):\n cols = rows[y].split('\\t')\n for x in range(numCols):\n tileName = cols[x].replace('\\n', '')\n if tileName == \"StartingRoom\":\n global currentPosition\n currentPosition = [x, y]\n _world[(x, y)] = None if tileName == '' else getattr(__import__('tiles'), tileName) (x, y)",
"def open_spots(self):\n ret = []\n for i in range(1,25):\n if self.nodes[i].piece == None:\n ret.append(i)\n return ret",
"def available_positions(self):\n if len([x for x in self.grid.values() if x[0] != None]) < 13:\n return [x for x in assignable_positions if self.grid[x][1] == \"---\"]\n else:\n return []",
"def extract_fixed_point_locations(fps):\n fixed_point_location = [fp['x'] for fp in fps]\n\n fixed_point_locations = np.vstack(fixed_point_location)\n\n return fixed_point_locations",
"def preload_all_problems(self):\n for _, _, filenames in os.walk(self.problemDir):\n for filename in filenames:\n if filename[-3:] == \".py\" and filename != \"__init__.py\":\n self.load_problem_file(filename[0:-3])",
"def getting_flags_locations(self):\n print(self.flags)\n self.line_finder.find_line(self.html)",
"def _load(self):\n if ((self._selection_rate + self._random_selection_rate) / 2) * self._nb_children != 1:\n raise Exception(\"Either the selection rate, random selection rate or the number of children is not \"\n \"well adapted to fit the population\")\n\n values_to_set = fileloader.load_file_as_values(self._model_to_solve)\n zeros_to_count = '0' if len(values_to_set) < 82 else '00'\n print(\"The solution we have to solve is: (nb values to find = {})\".format(values_to_set.count(zeros_to_count)))\n\n self._start_time = time()\n s = Sudoku(values_to_set)\n s.display()\n\n self._run_pencil_mark(s)\n return s",
"def add_loc(self):\n self.loc = 0\n for t in self.thys:\n with open(t, 'r') as f:\n for l in f:\n if l.strip():\n self.loc += 1",
"def get_valid_locations(self, board):\n valid_locations = []\n for col in range(self._COLUMNCOUNT):\n try:\n if validate_column(board, col):\n valid_locations.append(col)\n except InvalidColumn:\n pass\n return valid_locations",
"def _parse(self):\n with open(self._path, 'r') as file:\n try:\n line = file.readline()\n while line:\n if line.startswith(BUG_START):\n line = file.readline()\n if line:\n # Extract bug type\n bug_type = line.split(' ', 1)[0]\n if bug_type not in self._bug_list:\n self._bug_list[bug_type] = []\n # Get whether or not the bug was reproduced\n reproduced = 'Bug was reproduced' in line\n line = file.readline()\n if line.startswith('Attempted'):\n # Skip the 'Attempted to reproduce' line if exists\n line = file.readline()\n bug_hash = line.split(' ')[-1].rstrip()\n line = file.readline()\n seq = ParsedSequence([])\n # Populate the sequence of requests that made the bug\n while line and not line.startswith(BUG_START):\n seq += self._get_request(line)\n line = file.readline()\n # Add the bug sequence to the bug list\n self._bug_list[bug_type].append((seq, reproduced, bug_hash))\n else:\n line = file.readline()\n except Exception as err:\n print(\"Failed to read bug log. Log was not a complete test log.\\n\"\n f\"{err!s}\")\n raise TestFailedException",
"def make_positions(\n night, runs, observatory, instrument, hlog, targets,\n skip_targets, tdata, posdata, load_old, retry,\n full, rname, smessages, fmessages, p2positions, okwrite\n):\n\n pdata = {}\n\n if load_old and os.path.exists(posdata):\n # pre-existing file found\n with open(posdata) as pin:\n for line in pin:\n arr = line.split()\n if len(arr) != 20:\n raise ValueError(\n f'Line = \"{line.strip()}\" from {posdata} had {len(arr)}!=20 items'\n )\n arr[3] = arr[3].replace('~',' ')\n pdata[arr[0]] = [\n '' if val == 'UNDEF' else val for val in arr[1:]\n ]\n print('Read position data from',posdata)\n\n if not retry:\n return pdata\n\n with open(posdata if okwrite else os.devnull,'w') as pout:\n for run in runs:\n\n if len(tdata[run]) == 1:\n # means its a power on/off\n continue\n\n if run in pdata and pdata[run][0] != '':\n # Already have positional data which we will\n # not re-do, so just write out to disk\n arr = ['UNDEF' if val == '' else val for val in pdata[run]]\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n continue\n\n recomp = True\n\n # Now going to try to work stuff out\n\n if full:\n print(f'Analysing positions for run {run}')\n\n # open the run file as an Rhead\n runname = os.path.join(night, run)\n try:\n if instrument == 'HiPERCAM':\n rhead = hcam.hcam.Rhead(runname)\n else:\n rhead = hcam.ucam.Rhead(runname)\n except:\n if full:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(f\"Failed to open {runname} as an Rhead\")\n continue\n\n # object name\n if hlog.format == 1:\n target = hlog.target[run]\n elif instrument == 'HiPERCAM':\n target = rhead.header.get(\"OBJECT\",'')\n else:\n target = rhead.header.get(\"TARGET\",'')\n target = target.strip().replace('~',' ')\n\n # RA, Dec lookup\n if target == '' or target in skip_targets:\n # don't even try\n autoid, ra, dec = 'UNDEF', 'UNDEF', 'UNDEF'\n recomp = False\n else:\n try:\n # See if we already have the info stored\n autoid, ra, dec = targets(target)\n except:\n # apparently we don't ...\n try:\n # attempt simbad lookup here\n autoid, ra, dec = target_lookup(target)\n targets.add_target(target, ra, dec, autoid)\n print(f' Added {target} to targets')\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n\n # save successful SIMBAD-based lookup\n smessages.append(\n f\"{autoid.replace(' ','~'):32s} \" +\n f\"{pos.to_string('hmsdms',sep=':',precision=2)} \" +\n f\"{target.replace(' ','~')}\"\n )\n\n except:\n if target in p2positions:\n # data loaded at the phase II stage -- last resort\n ra, dec = p2positions[target]\n print(f' Found {target} in phaseII data at RA={ra}, Dec={dec}')\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n targets.add_target(target, pos.ra.hour, pos.dec.value, target)\n autoid, ra, dec = targets(target)\n\n # save successful lookups\n smessages.append(\n f\"{target.replace(' ','~'):32s} \" +\n f\"{pos.to_string('hmsdms',sep=':',precision=2)} \" +\n f\"{target.replace(' ','~')}\"\n )\n\n else:\n # nothing worked\n print(\n f' No position found for {runname}, target = \"{target}\"'\n )\n autoid, ra, dec = 'UNDEF', 'UNDEF', 'UNDEF'\n skip_targets.append(target)\n\n # save in suitable format for adding to FAILED_TARGETS if wanted.\n fmessages.append(\n f\"{target.replace(' ','~'):32s} {rname} {night} {run}\"\n )\n recomp = False\n\n if not recomp and run in pdata:\n # can save a stack of time by not recomputing any Sun / Moon stuff\n arr = ['UNDEF' if val == '' else val for val in pdata[run]]\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n continue\n\n # start accumulating stuff to write out\n arr = [ra, dec, autoid]\n\n if ra == 'UNDEF' and dec == 'UNDEF' and instrument == 'ULTRASPEC':\n # for altitude / Sun / Moon stuff, telescope position\n # is good enough, so this is one final go at getting a\n # usable position.\n hd = rhead.header\n\n ra = hd.get(\"RA\", \"UNDEF\")\n dec = hd.get(\"Dec\", \"UNDEF\")\n if ra != 'UNDEF' and dec != 'UNDEF':\n try:\n ra, dec, syst = str2radec(ra + ' ' + dec)\n except:\n pass\n\n # time-dependent info\n ut_start, mjd_start, ut_end, mjd_end, cadence, \\\n expose, nok, ntotal = tdata[run]\n\n try:\n\n mjd_start = float(mjd_start)\n mjd_end = float(mjd_end)\n tstart = Time(mjd_start, format='mjd')\n tmid = Time((mjd_start+mjd_end)/2, format='mjd')\n tend = Time(mjd_end, format='mjd')\n\n # Scale Sun-Moon angle at mid time (0 = New Moon, 1 =\n # Full)\n sun_mid = get_sun(tmid)\n moon_mid = get_moon(tmid)\n sun_moon = sun_mid.separation(moon_mid).degree / 180\n\n if ra != 'UNDEF' and dec != 'UNDEF':\n\n # Calculate the Alt, Az at start, middle, end\n frames = AltAz(obstime=[tstart,tmid,tend], location=observatory)\n pos = SkyCoord(f'{ra} {dec}',unit=(u.hourangle, u.deg))\n points = pos.transform_to(frames)\n alts = [round(alt,1) for alt in points.alt.degree]\n azs = [round(az,1) for az in points.az.degree]\n arr += alts + azs\n\n # Calculate range of airmasses\n seczs = np.array([float(secz) for secz in points.secz])\n secz_min, secz_max = seczs.min(), seczs.max()\n\n # Need to check for meridian crossing, and if it happens\n # we need to close in on it\n sinas = [np.sin(az) for az in points.az]\n if sinas[0] > 0 and sinas[2] < 0:\n s1, s2 = sinas[0], sinas[2]\n t1, t2 = tstart, tend\n if sinas[1] > 0:\n s1 = sinas[1]\n t1 = tmid\n else:\n s2 = sinas[1]\n t2 = tmid\n while s1 - s2 > 0.0005:\n tguess = t1 + s1/(s1-s2)*(t2-t1)\n frame = AltAz(obstime=tguess, location=observatory)\n point = pos.transform_to(frame)\n sina = np.sin(point.az)\n if sina > 0:\n s1 = sina\n t1 = tguess\n else:\n s2 = sina\n t2 = tguess\n secz_min = float(point.secz)\n\n dsecz = round(secz_max-secz_min,2)\n arr += [round(secz_min,2), round(secz_max,2), dsecz]\n\n # Now calculate the angular distance from the Sun\n # and Moon at the mid-time\n sun_mid_trans = sun_mid.transform_to(frames[1])\n moon_mid_trans = moon_mid.transform_to(frames[1])\n point_mid = points[1]\n sun_dist = point_mid.separation(sun_mid_trans).degree\n moon_dist = point_mid.separation(moon_mid_trans).degree\n arr += [round(sun_dist,1),round(moon_dist,1)]\n\n else:\n arr = arr[:3] + 11*['UNDEF']\n\n # Now some data on the altitude of the Sun & Moon\n frame = AltAz(obstime=tstart, location=observatory)\n sun_start = get_sun(tstart).transform_to(frame)\n moon_start = get_moon(tstart).transform_to(frame)\n\n # end\n frame = AltAz(obstime=tend, location=observatory)\n sun_end = get_sun(tend).transform_to(frame)\n moon_end = get_moon(tend).transform_to(frame)\n\n arr += [\n round(sun_start.alt.degree,1), round(sun_end.alt.degree,1),\n round(moon_start.alt.degree,1), round(moon_end.alt.degree,1),\n round(sun_moon,3),\n ]\n\n except:\n if full:\n print(f\"Problem on run = {run}\")\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(\n exc_traceback, limit=1, file=sys.stdout\n )\n traceback.print_exc(file=sys.stdout)\n\n # write out info\n arr = arr[:3] + 16*['UNDEF']\n\n arr[2] = arr[2].replace(' ','~')\n pout.write(\n f\"{run} {arr[0]} {arr[1]} {arr[2]} {arr[3]} {arr[4]} \" +\n f\"{arr[5]} {arr[6]} {arr[7]} {arr[8]} {arr[9]} {arr[10]} \" +\n f\"{arr[11]} {arr[12]} {arr[13]} {arr[14]} {arr[15]} \" +\n f\"{arr[16]} {arr[17]} {arr[18]}\\n\"\n )\n\n arr[2] = arr[2].replace('~',' ')\n pdata[run] = [\n '' if val == 'UNDEF' else val for val in arr\n ]\n\n if okwrite:\n print('Written positional data to',posdata)\n\n return pdata",
"def _load_breaks(self):\n for (filename, lineno) in Breakpoint.bplist.keys():\n self._add_to_breaks(filename, lineno)",
"def loc():\n file_types = (\n ['Python', 'py', '#']\n )\n\n click.echo('Lines of code\\n-------------')\n\n click.echo(\"{0}: {1}\".format(file_types[0], count_locs(file_types[1],\n file_types[2])))\n\n return None",
"def holes(self, test_pylos):\n availableholes = []\n for layer in range(1, 4):\n for row in range(4 - layer):\n for column in range(4 - layer):\n value = test_pylos.get(layer, row, column)\n if value is None:\n try:\n test_pylos.validPosition(layer, row, column)\n hole = [layer, row, column]\n except game.InvalidMoveException:\n pass\n else:\n availableholes.append(hole)\n return availableholes",
"def reload_positions(self):\n self._initial_position = load_pose_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'initial')\n self._target_position = load_pose_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'target')\n self._initial_image = load_data_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'initial',\n 'image', default=None)\n self._target_image = load_data_from_npz(self._target_filename,\n self._actuator_name, str(self._target_number), 'target',\n 'image', default=None)",
"def checkIfRightPlace(self):\n print(\"Checking location\")\n\n notFoundMSB = 0\n notFoundLUABND = 0\n notFoundFFXBND = 0\n notFoundEmevd = 0\n gameParamExists = False\n\n exeStatus = check_exe.check_exe_checksum()\n check_for_dcx = False\n if (exeStatus == \"Remaster\"):\n check_for_dcx = True\n\n for iFile in self.inputFilesAll:\n if not (os.path.isfile(self.MAPSTUDIO + iFile + '.msb')):\n notFoundMSB += 1\n\n if not (iFile == \"m12_00_00_01\"):\n if (check_for_dcx):\n if not (os.path.isfile(self.AISCRIPTS + iFile + '.luabnd.dcx')):\n notFoundLUABND += 1\n else:\n if not (os.path.isfile(self.AISCRIPTS + iFile + '.luabnd')):\n notFoundLUABND += 1\n\n if (check_for_dcx):\n if not (os.path.isfile(self.EMEVDS + iFile + '.emevd.dcx')):\n notFoundEmevd += 1\n else:\n if not (os.path.isfile(self.EMEVDS + iFile + '.emevd')):\n notFoundEmevd += 1\n \n for iFile in self.inputFFXFiles:\n if (iFile != \"NONE\"):\n if (check_for_dcx):\n if not (os.path.isfile(self.FFX_DIR_REMASTERED.format(iFile))):\n notFoundFFXBND += 1\n else:\n if not (os.path.isfile(self.FFX_DIR.format(iFile))):\n notFoundFFXBND += 1\n\n if (check_for_dcx):\n gameParamExists = os.path.isfile(self.GAMEPARAM_PATH_REMASTERED)\n else:\n gameParamExists = os.path.isfile(self.GAMEPARAM_PATH)\n\n return (notFoundMSB, notFoundLUABND, notFoundFFXBND, notFoundEmevd, exeStatus, gameParamExists)",
"def fix_missing_locations(node):\r\n def _fix(node, lineno, col_offset):\r\n attrs = getattr(node, '_attributes', ()) or ()\r\n if 'lineno' in attrs:\r\n if not hasattr(node, 'lineno'):\r\n node.lineno = lineno\r\n else:\r\n lineno = node.lineno\r\n if 'col_offset' in attrs:\r\n if not hasattr(node, 'col_offset'):\r\n node.col_offset = col_offset\r\n else:\r\n col_offset = node.col_offset\r\n for child in iter_child_nodes(node):\r\n _fix(child, lineno, col_offset)\r\n _fix(node, 1, 0)\r\n return node",
"def filled_positions(self):\n return [x for x in assignable_positions if self.grid[x][0]]",
"def compare_with(self, bugs):\n self.__true_positives = []\n self.__false_positives = []\n self.__false_negatives = []\n for bug in bugs:\n if Reference.__has_similar_rect(bug.bounds(), self.__bugs):\n self.__true_positives.append(bug.bounds())\n else:\n self.__false_positives.append(bug.bounds())\n\n for bug in self.__bugs:\n if not Reference.__has_similar_rect(bug, self.__true_positives):\n self.__false_negatives.append(bug)",
"def __parse_position_data(self):\n self.add_debug('Parse position data ...')\n\n for i in range(len(self._lines)):\n if self.has_errors(): break\n line = self._lines[i]\n if len(line) < 1: continue\n if self.TIMESTAMP_MARKER in line: continue\n if self.RACK_BARCODE_MARKER in line: continue\n\n msg = 'Unexpected content in line %i: %s' % (i + 1, line)\n if not self.SEPARATOR in line: self.add_error(msg)\n tokens = line.split(self.SEPARATOR)\n if not len(tokens) == 2: self.add_error(msg)\n if self.has_errors(): continue\n\n pos_label = tokens[0].strip()\n if self.position_map.has_key(pos_label):\n msg = 'Duplicate position label \"%s\"' % (pos_label)\n self.add_error(msg)\n if self.has_errors(): continue\n\n tube_barcode = tokens[1].strip()\n if tube_barcode == self.NO_TUBE_PLACEHOLDER: tube_barcode = None\n self.position_map[pos_label] = tube_barcode",
"def load_data(self):\n @Logger.runtime\n def process_coords():\n \"\"\"\n The placement of locations on our minimap is crucial. Panda3D objects however have a coordinate range from\n -1 to 1 on all axis, meaning that if we read a coordinate of a location from some image processing software\n by hand, we have to transform those coordinates into coordinates Panda would understand. This function does\n just that.\n :return: Normalized coordinates of location coordinates.\n \"\"\"\n split_coords = row[\"map_coord\"].split(',')\n map_x, map_y = [int(i) for i in split_coords]\n map_x_normed = ((map_x*2) / self.MINIMAP_DIM) - 1\n map_y_normed = -(((map_y*2) / self.MINIMAP_DIM) - 1)\n return map_x_normed, map_y_normed\n\n @Logger.runtime\n def process_texture():\n texture_path = Path(\"resource/textures/{}\".format(row[\"texture\"]))\n texture = self.loader.loadTexture(texture_path)\n return texture\n\n # the cylinder is loaded here but it does not yet show up, until it's specifically asked to\n self.scene_3d_model = self.loader.loadModel(self.PATHS[\"3D_SCENE_MODEL\"])\n\n try:\n with open(self.PATHS[\"LOCATIONS_DB\"], \"r\") as l_file:\n data = csv.DictReader(l_file, delimiter=\"|\")\n for row in data:\n id = int(row[\"id\"])\n x, y = process_coords()\n neighbors = [int(neighbor_id) for neighbor_id in row[\"neighbors\"].split(',')]\n texture = process_texture()\n location = Location(id, x, y, neighbors, texture)\n location.reparentTo(self.render2d)\n self.locations.append(location)\n Logger.log_info('The locations_db has been loaded')\n except:\n Logger.error('{} file not found!'.format(self.PATHS[\"LOCATIONS_DB\"]))\n\n self.active_location = self.locations[0]",
"def findfeatures(self):\n self.set_wdiff()\n\n #xp, wp=st.findfeatures(self.xarr, self.farr, self.slines, self.sfluxes,\n # self.ws, mdiff=self.mdiff, wdiff=self.wdiff, sigma=self.sigma, niter=self.niter, sections=3)\n xp,wp=st.crosslinematch(self.xarr, self.farr, self.slines, self.sfluxes,\n self.ws, mdiff=self.mdiff, wdiff=20, sigma=self.sigma, niter=self.niter)\n for x, w in zip(xp, wp):\n if w not in self.wp and w>-1: \n self.xp.append(x)\n self.wp.append(w)\n self.plotFeatures()\n self.redraw_canvas()",
"def load_location_codes(force_reload):\n logger.info('Loading location data')\n load_location_data(force_reload)",
"def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())"
] | [
"0.56719327",
"0.55538607",
"0.5547954",
"0.5500129",
"0.54332054",
"0.5379162",
"0.5337847",
"0.53154814",
"0.52816904",
"0.52486926",
"0.52087176",
"0.5208394",
"0.5205178",
"0.5172711",
"0.51594424",
"0.5153587",
"0.5099652",
"0.5093775",
"0.5068396",
"0.5058991",
"0.50521183",
"0.50520426",
"0.5029679",
"0.50115305",
"0.50094557",
"0.50054514",
"0.49971655",
"0.49732876",
"0.4972126",
"0.49670708"
] | 0.6171006 | 0 |
Returns true if the list contains a similar rect. | def __has_similar_rect(rect, rect_list):
for i in reversed(range(len(rect_list))):
if Reference.__is_similar(rect_list[i], rect):
del(rect_list[i])
return True
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __is_similar(rect, another):\n area1 = rect[2]*rect[3]\n area2 = another[2]*another[3]\n intersect_width = min(rect[0]+rect[2], another[0]+another[2]) - max(rect[0],another[0])\n if not intersect_width > 0:\n return False\n intersect_height = min(rect[1]+rect[3], another[1]+another[3]) - max(rect[1],another[1])\n if not intersect_height > 0:\n return False\n intersect_area = intersect_width * intersect_height\n return (float(intersect_area) / float(min(area1,area2))) > 0.7",
"def _rect_intersects(self, rect):\n\tb = (self.left() > rect.right() or \n\t\tself.right() < rect.left() or \n\t\tself.top() < rect.bottom() or \n\t\tself.bottom() > rect.top())\n\treturn not b",
"def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside",
"def intersects(self, rect):\n\t\treturn ( rect.right >= self.left and rect.left < self.right\n\t\t\tand rect.bottom >= self.top and rect.top < self.bottom )",
"def rectIsSimilar(rect1, rect2, similarity):\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n\n if rectContains(rect1, rect2): return True\n if rectContains(rect2, rect1): return True\n\n wratio = float(w1) / float(w2)\n if wratio > 1: wratio = 1 / wratio\n if wratio < similarity: return False\n\n hratio = float(h1) / float(h2)\n if hratio > 1: hratio = 1 / hratio\n if hratio < similarity: return False\n\n wavg, havg = (w1 + w2) / 2.0, (h1 + h2) / 2.0\n\n wratio = abs(x1 - x2) / wavg\n\n if wratio > 1 - similarity: return False\n\n hratio = abs(y1 - y2) / havg\n if hratio > 1 - similarity: return False\n\n return True",
"def rectangle_already_tracked(rectangles, rectangle):\n for current_rectangle in rectangles:\n if rectangle_percentage_coincidence(current_rectangle, rectangle) > 0.6:\n return True \n return False",
"def __contains__(self, item):\n if len(item) != len(self.sizes):\n raise ValueError('Point dimension does not match grid dimension')\n for i in range(len(self.sizes)):\n if not 1 <= item[i] < self.sizes[i] - 1:\n return False\n return True",
"def hasMatch(self):\n for cell in (self.head, self.tail):\n for neighbour in cell.find_neighbours():\n if neighbour.pips == cell.pips:\n return True\n return False",
"def __contains__(self, item):\n try:\n pos = Vec2(*item)\n return pos.x >= self.origin.x and pos.y >= self.origin.y \\\n and pos.x < self.origin.x + self.size.x \\\n and pos.y < self.origin.y + self.size.y\n except TypeError:\n return False",
"def check_win(self):\n for pos in self.win_set:\n s = set([self.grid[p] for p in pos])\n if len(s) == 1 and (0 not in s):\n return True\n return False",
"def is_square (self):\n return self.width == self.height",
"def is_win(self, color):\n win = self.n\n # check y-strips\n for y in range(self.n):\n count = 0\n for x in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check x-strips\n for x in range(self.n):\n count = 0\n for y in range(self.n):\n if self[x][y] == color:\n count += 1\n if count == win:\n return True\n # check two diagonal strips\n count = 0\n for d in range(self.n):\n if self[d][d] == color:\n count += 1\n if count == win:\n return True\n count = 0\n for d in range(self.n):\n if self[d][self.n - d - 1] == color:\n count += 1\n if count == win:\n return True\n\n return False",
"def contains(self, coord):\n # print(coord, self.position, self.size)\n return (0 <= coord[0] - self.position[0] < self.size[0] and\n 0 <= coord[1] - self.position[1] < self.size[1])",
"def wid_in(self, wid):\n \n for row in self.tiles:\n if wid in row:\n return True\n return False",
"def maybe_rectal(self):\n return bool(set(self.locations) & set(StandardTerminology.RECTAL_LOCATIONS))",
"def almostEqualList(self, l1:List[float], l2:List[float], margin:float):\r\n ret = False\r\n for i in range(0,len(l1)):\r\n diff = abs(l1[i] - l2[i])\r\n if diff < margin:\r\n ret = True\r\n else:\r\n return False\r\n return ret",
"def __check_if_symbol_is_over(rect1, rect2):\n\n rect_center_x_coord = rect1[4][0]\n rect2_center_x_coord = rect2[4][0]\n rect2_width = rect2[5]\n rect1_center_y_coord = rect1[4][1]\n rect2_center_y_coord = rect2[4][1]\n\n leftmost_x_coord = rect2_center_x_coord - (rect2_width // 2)\n rightmost_y_coord = rect2_center_x_coord + (rect2_width // 2)\n if (\n leftmost_x_coord <= rect_center_x_coord <= rightmost_y_coord\n and\n rect1_center_y_coord < rect2_center_y_coord\n ):\n return True\n else:\n return False",
"def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))",
"def issquare(self):\r\n if self.width == self.height:\r\n return True\r\n else:\r\n return False",
"def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False",
"def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True",
"def rectContains(rect1, rect2):\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n\n if x2 >= x1 and y2 >= y1 and x2 <= x1 + w1 and y2 <= y1 + h1 and x2 + w2 <= x1 + w1 and y2 + h2 <= y1 + h1:\n return True\n return False",
"def isSquare(self):\n return self._width == self._height",
"def check_overlap(current, hit, overlap = 200):\n for prev in current:\n p_coords = prev[2:4]\n coords = hit[2:4]\n if get_overlap(coords, p_coords) >= overlap:\n return True\n return False",
"def collision_pipes(self, pipes_list: list):\n result = False\n for pipe in pipes_list:\n if self.x_pos + self.width > pipe.x_pos and self.x_pos < pipe.x_pos + pipe.width:\n if self.y_pos < pipe.y_pos_up + pipe.height: # collide with top\n result = True\n break\n elif self.y_pos + self.height > pipe.y_pos_down: # collide with bottom\n result = True\n break\n return result",
"def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1",
"def collided_with(self, entity):\n drawables = self.get_drawables()\n rectangles = []\n for d in drawables:\n rectangles.append(d.get_rect())\n return entity.get_rect().collidelist(rectangles) != -1",
"def is_redundant(t, t_objects):\n\n x,y,w,h = t.bounding_box\n\n for tracker in t_objects:\n if t.face_id == tracker.face_id:\n continue\n x_t, y_t, w_t, h_t = tracker.bounding_box\n result = in_rect(np.array([[x,y],[x+w,y], [x,y+h], [x+w,y+h]]),\n (x_t, y_t), (x_t+w_t, y_t+h_t))\n\n if sum(result) > 1:\n return True\n return False",
"def inside(self, l1, l2):\n x, y = self.coords2pixel(l1, l2)\n\n return x >= 0 and x < self.XSize() and y >= 0 and y < self.YSize()",
"def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True"
] | [
"0.6806085",
"0.65448457",
"0.6458573",
"0.6406005",
"0.6380805",
"0.6235643",
"0.6211002",
"0.6206766",
"0.615934",
"0.6149881",
"0.61256045",
"0.6114601",
"0.6105064",
"0.60557085",
"0.60360897",
"0.60351527",
"0.60016364",
"0.5986043",
"0.59527403",
"0.5941617",
"0.59376997",
"0.59040636",
"0.5895333",
"0.5891638",
"0.5851191",
"0.5848853",
"0.5848853",
"0.58358145",
"0.583148",
"0.5820121"
] | 0.8229332 | 0 |
Returns true if the rects are of similar size and position. | def __is_similar(rect, another):
area1 = rect[2]*rect[3]
area2 = another[2]*another[3]
intersect_width = min(rect[0]+rect[2], another[0]+another[2]) - max(rect[0],another[0])
if not intersect_width > 0:
return False
intersect_height = min(rect[1]+rect[3], another[1]+another[3]) - max(rect[1],another[1])
if not intersect_height > 0:
return False
intersect_area = intersect_width * intersect_height
return (float(intersect_area) / float(min(area1,area2))) > 0.7 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def issquare(self):\r\n if self.width == self.height:\r\n return True\r\n else:\r\n return False",
"def is_square (self):\n return self.width == self.height",
"def rectIsSimilar(rect1, rect2, similarity):\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n\n if rectContains(rect1, rect2): return True\n if rectContains(rect2, rect1): return True\n\n wratio = float(w1) / float(w2)\n if wratio > 1: wratio = 1 / wratio\n if wratio < similarity: return False\n\n hratio = float(h1) / float(h2)\n if hratio > 1: hratio = 1 / hratio\n if hratio < similarity: return False\n\n wavg, havg = (w1 + w2) / 2.0, (h1 + h2) / 2.0\n\n wratio = abs(x1 - x2) / wavg\n\n if wratio > 1 - similarity: return False\n\n hratio = abs(y1 - y2) / havg\n if hratio > 1 - similarity: return False\n\n return True",
"def __has_similar_rect(rect, rect_list):\n for i in reversed(range(len(rect_list))):\n if Reference.__is_similar(rect_list[i], rect):\n del(rect_list[i])\n return True\n return False",
"def isSquare(self):\n return self._width == self._height",
"def is_equal(self, other):\n return self.close(other) & (abs(self.width - other.width) < 100) & (abs(self.height - other.height) < 50)",
"def __check_if_symbol_is_over(rect1, rect2):\n\n rect_center_x_coord = rect1[4][0]\n rect2_center_x_coord = rect2[4][0]\n rect2_width = rect2[5]\n rect1_center_y_coord = rect1[4][1]\n rect2_center_y_coord = rect2[4][1]\n\n leftmost_x_coord = rect2_center_x_coord - (rect2_width // 2)\n rightmost_y_coord = rect2_center_x_coord + (rect2_width // 2)\n if (\n leftmost_x_coord <= rect_center_x_coord <= rightmost_y_coord\n and\n rect1_center_y_coord < rect2_center_y_coord\n ):\n return True\n else:\n return False",
"def _rect_intersects(self, rect):\n\tb = (self.left() > rect.right() or \n\t\tself.right() < rect.left() or \n\t\tself.top() < rect.bottom() or \n\t\tself.bottom() > rect.top())\n\treturn not b",
"def is_square(self):\n lines, columns = self.get_size()\n return lines == columns",
"def compare(self, other_group):\n x_bounds = self.bounding_box_x_len == other_group.bounding_box_x_len\n y_bounds = self.bounding_box_y_len == other_group.bounding_box_y_len\n same_num_cells = self.num_colored_cells == other_group.num_colored_cells\n if not (x_bounds and y_bounds and same_num_cells):\n return False\n for row_ind in range(len(other_group.cells)):\n for col_ind in range(len(other_group.cells[0])):\n if other_group.cells[row_ind][col_ind] != self.cells[row_ind][col_ind]:\n return False\n return True",
"def inside_rectangle(self, x, y):\n if (self.pos.x - self.width < x < self.pos.x + self.width and\n self.pos.y - self.height < y < self.pos.y + self.height):\n return True",
"def intersects(self, rect):\n\t\treturn ( rect.right >= self.left and rect.left < self.right\n\t\t\tand rect.bottom >= self.top and rect.top < self.bottom )",
"def check_place(self, positions):\n return self.size == len(set(positions[i] + i for i in range(self.size))) == len(\n set(positions[i] - i for i in range(self.size)))",
"def __contains__(self, item: 'BoundingBox2D') -> bool:\n top_left_inside = item.xmin >= self.xmin and item.ymin >= self.ymin\n bottom_right_inside = item.xmax <= self.xmax and item.ymax <= self.ymax\n return top_left_inside and bottom_right_inside",
"def _shape_compare(shape1, shape2):\n if len(shape1) != len(shape2):\n return False\n for s1, s2 in zip(shape1, shape2):\n if s1 != s2:\n return False\n return True",
"def is_square(self):\n return self.shape[0] == self.shape[1]",
"def is_square(self):\n return self.shape[0] == self.shape[1]",
"def is_valid_size(self, dot_width, dot_height, distance, screen_width, screen_height):\n if dot_width * distance > screen_width or dot_height * distance > screen_height:\n return False\n return True",
"def fits_into(self, other) -> bool:\n return other.width > self.width and other.height > self.height",
"def rectContains(rect1, rect2):\n x1, y1, w1, h1 = rect1\n x2, y2, w2, h2 = rect2\n\n if x2 >= x1 and y2 >= y1 and x2 <= x1 + w1 and y2 <= y1 + h1 and x2 + w2 <= x1 + w1 and y2 + h2 <= y1 + h1:\n return True\n return False",
"def identical_grid(self, other) -> bool:\n return (\n (\n self.crs is None\n or other.raster.crs is None\n or self.crs == other.raster.crs\n )\n and np.allclose(self.transform, other.raster.transform, atol=1e-06)\n and np.allclose(self.shape, other.raster.shape)\n )",
"def is_overlapping(self, region):\n if self.x2 < region.x1:\n return False # this box is left the other\n if self.x1 > region.x2:\n return False # this box is right the other\n if self.y2 < region.y1:\n return False # this box is above the other\n if self.y1 > region.y2:\n return False # this box is below the other\n return True",
"def is_valid(box, img):\n valid_width = box['top_left_x'] > 0 and box['bottom_right_x'] < img.shape[1]\n valid_height = box['top_left_y'] > 0 and box['bottom_right_y'] < img.shape[0]\n return valid_width and valid_height",
"def is_rectal(self):\n return bool(self.locations and set(self.locations) <= set(StandardTerminology.RECTAL_LOCATIONS)) \\\n or bool(self.depth and 4 <= self.depth <= 16)",
"def check_position_for_same_occupancy(self, position1, position2):\n return self.board.board[position1] == self.board.board[position2]",
"def __eq__(self, other):\n return np.all(self.grid == other.grid) and np.all(self.pos == other.pos)",
"def _acceptable_dimensions(self, box):\n return self._min_width < box.x1-box.x0 < self._max_width and\\\n self._min_height < box.y1-box.y0 < self._max_height",
"def box_collision(self):\n border_box_pos_1 = self.box_1.x + self.box_1.width/2\n border_box_pos_2 = self.box_2.x - self.box_2.width/2\n\n if (border_box_pos_2 - border_box_pos_1) <= 0:\n return True\n else:\n return False",
"def similar(self, other):\r\n if self.rows == other.rows and self.columns == other.columns:\r\n return True\r\n else:\r\n return False",
"def check_position_for_same_colour(self, position1, position2):\n return (not self.check_position_free(position1)) and self.check_position_for_same_occupancy(position1, position2)"
] | [
"0.69476604",
"0.68666345",
"0.68315774",
"0.67185247",
"0.6648864",
"0.65498066",
"0.65251964",
"0.6512791",
"0.6401279",
"0.6394241",
"0.63913405",
"0.6323339",
"0.6255712",
"0.62301314",
"0.6203733",
"0.6200238",
"0.6200238",
"0.6144445",
"0.6120417",
"0.6112837",
"0.61006904",
"0.6097704",
"0.6095398",
"0.60875547",
"0.60769254",
"0.606063",
"0.6056795",
"0.60337394",
"0.6031573",
"0.6008865"
] | 0.75187314 | 0 |
Builds the obj on signal, or returns the signal if obj is None. | def build_or_passthrough(model, obj, signal):
return signal if obj is None else model.build(obj, signal) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __get__(self, obj, cls):\n if obj is None:\n msg = 'Signals can only be accessed through an instance'\n raise AttributeError(msg)\n instances = self._instances\n if obj in instances:\n res = instances[obj]\n else:\n res = instances[obj] = _Signal()\n return res",
"def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)",
"def get_event_obj(self, obj: T) -> Event | None:\n\n if self.ufp_event_obj is not None:\n return cast(Event, getattr(obj, self.ufp_event_obj, None))\n return None",
"def _finishConstruction(self, obj):\n return obj",
"def _finishConstruction(self, obj):\n return obj",
"def default(self, obj): # pylint: disable=method-hidden\n if isinstance(obj, (ArmCalcInput, ArmCalcOutput)):\n return obj.__dict__\n elif isinstance(obj, (datetime, date)):\n return to_wcf_date(obj)\n return super(ArmCalcJsonEncoder, self).default(obj)",
"def getPlugged(self):\n return SignalBase(obj=signal_base_getPlugged(self.obj))",
"def obj_factory (d):\n try:\n kind = d['kind']\n o = self.vtable.get (kind, Obj) (d)\n except KeyError:\n o = obj (d)\n return o",
"def serialize(self, obj):\n return obj",
"def obj(self) -> object:\n pass",
"def build_from_magic_method(self, obj):\n magic_method = getattr(obj, self.magic_method_name)\n return magic_method()",
"def to_base(self, obj):\n if hasattr(obj, \"to_base\"):\n return obj.to_base()\n return obj",
"def _builder(o_name,_nodes,_tagged_reals):\n obj = _tagged_reals[o_name]\n \n if isinstance(obj,ElementaryReal):\n un = UncertainReal._archived_elementary(\n uid = obj.uid,\n x = obj.x\n )\n _tagged_reals[o_name] = un \n \n elif isinstance(obj,IntermediateReal): \n \n _node = _nodes[obj.uid] \n \n un = UncertainReal(\n obj.value,\n _vector_index_to_node( obj.u_components ),\n _vector_index_to_node( obj.d_components ),\n _ivector_index_to_node( obj.i_components, _nodes ),\n _node,\n )\n \n _tagged_reals[o_name] = un\n\n else:\n assert False, \"unexpected: {!r}\".format(obj)\n\n return un",
"def _seek_signal(self, fullname, nameparts, ob):\n # Done traversing name: add to list or fail\n if ob is undefined or len(nameparts) == 0:\n if ob is undefined:\n return 'Signal %r does not exist.' % fullname\n if not hasattr(ob, '_IS_SIGNAL'):\n return 'Object %r is not a signal.' % fullname\n self._upstream.append(ob)\n return None # ok\n # Get value if ob is a signal\n if hasattr(ob, '_IS_SIGNAL'):\n self._upstream_reconnect.append(ob)\n try:\n ob = ob()\n except SignalValueError:\n return 'Signal %r does not have all parts ready' % fullname # we'll rebind when that signal gets a value\n # Resolve name\n name, nameparts = nameparts[0], nameparts[1:]\n if name == '*' and isinstance(ob, (tuple, list)):\n for sub_ob in ob:\n msg = self._seek_signal(fullname, nameparts, sub_ob)\n if msg:\n return msg\n return None # ok\n return self._seek_signal(fullname, nameparts, getattr(ob, name, undefined))",
"def object_hook(self, obj: Any) -> Any:\n if '__type__' in obj:\n if obj['__type__'] == 'complex':\n val = obj['__value__']\n return val[0] + 1j * val[1]\n if obj['__type__'] == 'array':\n return np.array(obj['__value__'])\n if obj['__type__'] == 'result':\n return Result.from_dict(obj['__value__'])\n if obj['__type__'] == 'to_json':\n return obj['__value__']\n if obj['__type__'] == 'dill':\n decoded = base64.standard_b64decode(obj['__value__'])\n return dill.loads(decoded)\n return obj",
"def create_event_for_not_specific_model_and_signal(sender_instance, alarm_instance):\n context = {\n 'var': sender_instance\n }\n # key 'var' take the object value, for this reason in alarm template,\n # user can access to any field of it. Remember that template execution should return True or False\n\n condition = False\n try:\n template = Template(alarm_instance.formula)\n condition = eval(template.render(Context(context)))\n except:\n print('Formula error: ' + alarm_instance.name)\n return\n\n if condition == True: # Create the new event\n event = AlarmEvent.objects.create(alarm=alarm_instance)\n event.content_type = [sender_instance]\n event.save()\n if condition == False: # Update finished date to event\n events = AlarmEvent.objects.filter(alarm=alarm_instance)\n for event in events:\n event.update(finished=timezone.now()) if list(event.content_type) == [sender_instance] else None",
"def __init__(self, instance, created, signal_type):\n\n self.instance = instance\n self.created = created\n self.signal_type = signal_type",
"def _serialize(\n self, value: typing.Any, attr: str | None, obj: typing.Any, **kwargs\n ):\n return value",
"def _get_native_object(self):\n if self.rqrmi_state_changed:\n self._update_state()\n self.native_object = rqrmilib.create_model(self.packed_rqrmi)\n return self.native_object",
"def obj(self):\n if not self._obj:\n self._get()\n return self._obj",
"def default(self, obj):\n if isinstance(obj, tuple(TYPES.values())):\n key = '__%s__' % obj.__class__.__name__\n return {key: obj.__dict__}\n return json.JSONEncoder.default(self, obj)",
"def pack(self, obj):\n # TODO: use a JSON encoder that handles more types?\n if obj is not None:\n return json.dumps(obj)",
"def transform_one(self, obj: Any):\n return obj",
"def do(self, obj):\n if isinstance(obj, str):\n return 'st__' + obj\n\n if type(obj) in literals:\n return obj\n\n # Now check for list, set, and tuple, and skip if they don't contain\n # any non-literals\n if type(obj) in builtin_iterables:\n if all(isinstance(x, literals) for x in flattened(obj)):\n return as_nested_lists(obj)\n\n oid = id(obj)\n if oid in self._working:\n raise GlueSerializeError(\"Circular reference detected\")\n self._working.add(oid)\n\n fun, version = self._dispatch(obj)\n logging.debug(\"Serializing %s with %s\", obj, fun)\n result = fun(obj, self)\n\n if isinstance(obj, types.FunctionType):\n result['_type'] = 'types.FunctionType'\n elif isinstance(obj, types.MethodType):\n result['_type'] = 'types.MethodType'\n else:\n result['_type'] = \"%s.%s\" % (type(obj).__module__,\n type(obj).__name__)\n if version > 1:\n result['_protocol'] = version\n\n self._working.remove(oid)\n return result",
"def get_obj(cls, tsb):\n if tsb in Instrument_CP.instrument_symbol_to_obj_map:\n return Instrument_CP.instrument_symbol_to_obj_map[tsb]\n else: \n return cls(tsb)",
"def From(obj):\n return ensure_future(obj)",
"def __self__(self):\n if self._ob is not None:\n return self._ob()",
"def default(self, obj):\n \n if isinstance(obj, np.ndarray):\n return list(obj)\n\n if isinstance(obj, uuid.UUID):\n return str(obj)\n\n if isinstance(obj, datetime.datetime):\n return obj.isoformat()\n \n if isinstance(obj,TPC):\n return obj._so()\n \n # No special handling called for; pass through\n return json.JSONEncoder.default(self, obj)",
"def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n obj = kwargs.get(\"obj\")\n if obj:\n self.obj = obj",
"def __call__(self, obj):\n return getattr(self, 'handle_' + type(obj).__name__, self.__unknown)(obj)"
] | [
"0.6035896",
"0.59348387",
"0.55138874",
"0.53285486",
"0.53285486",
"0.5293422",
"0.51871073",
"0.51645017",
"0.5118689",
"0.5080994",
"0.50475734",
"0.501795",
"0.49606928",
"0.49454436",
"0.4915813",
"0.48942703",
"0.48796424",
"0.48708183",
"0.4843524",
"0.48375458",
"0.48369715",
"0.4825205",
"0.4804504",
"0.47936967",
"0.47891894",
"0.47885403",
"0.4787898",
"0.47768104",
"0.4757055",
"0.4754472"
] | 0.72735953 | 0 |
bind methods to the shell | def bind(self, shell):
shell.render_prompt = types.MethodType(self.render_prompt.__func__, shell) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shell(self, cmd):\n raise NotImplementedError",
"def shell():\n pass",
"def _command(self, *cmd, handler=None):",
"def shell(self, **options):\n pass",
"def get_shell(self, shell):",
"def commands():",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def commands():\n pass",
"def Shell(self, cmd): # pylint: disable=invalid-name\n raise NotImplementedError",
"def command():\n pass",
"def cmd(self):",
"def modulecmds():\n\n class Commands:\n @staticmethod\n def setenv(key, val=None):\n val = val or key\n return \"setenv({0!r}, {1!r})\\n\".format(key, val)\n\n @staticmethod\n def unsetenv(key):\n return \"unsetenv({0!r})\\n\".format(key)\n\n @staticmethod\n def load(x):\n return \"load({0!r})\\n\".format(x)\n\n @staticmethod\n def load_first(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"load_first({0})\\n\".format(x)\n\n @staticmethod\n def unload(x):\n return \"unload({0!r})\\n\".format(x)\n\n @staticmethod\n def prepend_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"prepend_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def append_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"append_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def remove_path(key, val=None, sep=os.pathsep):\n val = val or key\n return \"remove_path({0!r},{1!r},sep={2!r})\\n\".format(key, val, sep)\n\n @staticmethod\n def set_alias(key, val):\n return \"set_alias({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_alias(key):\n return \"unset_alias({0!r})\\n\".format(key)\n\n @staticmethod\n def set_shell_function(key, val):\n return \"set_shell_function({0!r},{1!r})\\n\".format(key, val)\n\n @staticmethod\n def unset_shell_function(key):\n return \"unset_shell_function({0!r})\\n\".format(key)\n\n @staticmethod\n def use(path):\n return \"use({0!r})\\n\".format(path)\n\n @staticmethod\n def unuse(path):\n return \"unuse({0!r})\\n\".format(path)\n\n @staticmethod\n def swap(a, b):\n return \"swap({0!r}, {1!r})\\n\".format(a, b)\n\n @staticmethod\n def family(x):\n return \"family({0!r})\\n\".format(x)\n\n @staticmethod\n def conflict(x):\n return \"conflict({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq(x):\n return \"prereq({0!r})\\n\".format(x)\n\n @staticmethod\n def prereq_any(*x):\n x = \",\".join(\"{0!r}\".format(_) for _ in x)\n return \"prereq_any({0})\\n\".format(x)\n\n @staticmethod\n def source(f):\n return \"source({0!r})\\n\".format(f)\n\n @staticmethod\n def help(x):\n return \"help({0!r})\\n\".format(x)\n\n @staticmethod\n def whatis(x):\n return \"whatis({0!r})\\n\".format(x)\n\n @staticmethod\n def isloaded(x):\n return \"is_loaded({0!r})\\n\".format(x)\n\n return Commands()",
"def shell(self, shell):\n\n self._shell = shell",
"def _setup_command(self):\r\n raise NotImplementedError",
"def ConsoleRun(self, command, sender):\n pass",
"def _cli():\n pass",
"def _bind(self):\n\n pass",
"def _channel_invoke_shell(self) -> None:\n self._shell = True\n self.channel.shell()",
"def cli(_):\n pass",
"def cli(_):\n pass",
"def cli(self, env):\n raise NotImplementedError",
"def command(self):\n raise NotImplementedError",
"def getCommands(self):",
"def __init__(self, command_handler_name):\n\n # Set the command handler attributes\n self.name = command_handler_name",
"def do_command(self, args):\n pass",
"def accept_command():\n # TODO",
"def work(self):\n\n cmd = self.options.command\n cmdargs = self.options.args\n\n # find function\n fname = \"cmd_\" + cmd.replace('-', '_')\n if not hasattr(self, fname):\n self.log.error('bad subcommand, see --help for usage')\n sys.exit(1)\n fn = getattr(self, fname)\n\n b = inspect.signature(fn).bind(*cmdargs)\n\n fn(*b.args, **b.kwargs)",
"def repl_command(fxn):\n\n @functools.wraps(fxn)\n def wrapper(self, arglist):\n \"\"\"Wraps the command method\"\"\"\n args = []\n kwargs = {}\n if arglist:\n for arg in shlex.split(arglist):\n if \"=\" in arg:\n split = arg.split(\"=\", 1)\n kwargs[split[0]] = split[1]\n else:\n args.append(arg)\n return fxn(self, *args, **kwargs)\n\n return wrapper"
] | [
"0.7021964",
"0.69798553",
"0.68439513",
"0.68174416",
"0.66994655",
"0.6641779",
"0.66141284",
"0.66141284",
"0.66141284",
"0.66141284",
"0.65619034",
"0.6286182",
"0.6272943",
"0.6262935",
"0.62542933",
"0.62212807",
"0.61768734",
"0.6073992",
"0.6073585",
"0.6052224",
"0.59946984",
"0.59946984",
"0.599143",
"0.5975043",
"0.5974777",
"0.59716064",
"0.5943889",
"0.5886257",
"0.58556485",
"0.58451086"
] | 0.7553567 | 0 |
Simply returns the original position, default = [0,0,0] Inputs pose [x,y,theta] in [m,m,degrees] Returns pose [x,y,theta] in [m,m,degrees]. | def get_goal_pose(self,pose=[0,0,0]):
return pose | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initial_pose(self):\n return self._initial_pose",
"def get_pose(self):\n return self._model.get_pose()",
"def getPose(self):\n\t\treturn self.__subs['pose'].getData()",
"def get_current_position(self) -> np.ndarray:\n pose_world = forward_kinematics(self.body, eef_link=self.DoF - 1)\n pose_rcm = self.pose_world2rcm(pose_world, 'matrix')\n return pose_rcm",
"def pose_cb(self, msg):\n self.current_pose = msg.pose",
"def current_pose_estimate(self):\n \n try:\n stamp = self._tf_listener.getLatestCommonTime(self._base_frame, self._map_frame)\n curr_pose = PoseStamped(header=Header(stamp=stamp, frame_id=self._base_frame))\n curr_pose = self._tf_listener.transformPose(self._map_frame, curr_pose)\n angles = tr.euler_from_quaternion([\n curr_pose.pose.orientation.x,\n curr_pose.pose.orientation.y,\n curr_pose.pose.orientation.z,\n curr_pose.pose.orientation.w])\n return Particle(curr_pose.pose.position.x, curr_pose.pose.position.y, angles[2],1)\n except (tf2.ExtrapolationException, tf2.LookupException, tf2.TransformException) as e:\n print(\"Robot pose estimate not ready yet: \", e.message)\n return Particle(0,0,0,1)",
"def do_transform_pose(transform: Transform, pose: Pose) -> Pose:\n transform_mat = alloy.math.transformation_matrix_from_array(alloy.ros.transform_to_numpy(transform))\n pose_mat = alloy.math.transformation_matrix_from_array(alloy.ros.pose_to_numpy(pose))\n #combine two transformation matrix\n trans_pose_mat = np.matmul(transform_mat, pose_mat)\n\n return alloy.ros.numpy_to_pose(alloy.math.transformation_matrix_to_array(trans_pose_mat))",
"def _pose_from_odom(self, odom): \n pose = odom.pose.pose.position\n return [pose.x, pose.y, pose.z]",
"def transform(self, passed_stamped_pose):\n # Creating / Updating transform with latest translation and rotation.\n transform = TransformStamped()\n transform.header = rospy.get_rostime()\n transform.transform.translation = Point(self.translation[0],self.translation[1], 0.0)\n transform.transform.rotation = Quaternion(self.rotation[0],self.rotation[1],self.rotation[2],self.rotation[3])\n\n # pose = PoseStamped(passed_stamped_pose.header, passed_stamped_pose.pose)\n pose = tf2_geometry_msgs.do_transform_pose(passed_stamped_pose, transform)\n \n return pose",
"def current_pose():\n global current_pose\n while current_pose is None:\n pass\n return current_pose",
"def get_pose_of_model(self, robot_name):\n pose_now = self.gz_model_obj.get_model_pose(robot_name)\n \n return pose_now",
"def get_extrinsic_matrix(pose):\n batch_size, _ = pose.shape\n rot = pose[:,:3]\n trans = pose[:,3:]\n\n rot = transforms.euler_angles_to_matrix(rot,convention=\"XYZ\")\n pose = torch.cat((rot,trans.view(batch_size, 3, 1)), -1)\n\n return pose",
"def transform_pose(self, pose: Pose, target_frame: str) -> Union[Pose, None]:\n copy_pose = pose.copy()\n copy_pose.header.stamp = rospy.Time(0)\n if not self.canTransform(target_frame, pose.frame, rospy.Time(0)):\n rospy.logerr(\n f\"Can not transform pose: \\n {pose}\\n to frame: {target_frame}.\\n Maybe try calling 'update_transforms_for_object'\")\n return\n new_pose = super().transformPose(target_frame, copy_pose)\n\n copy_pose.pose = new_pose.pose\n copy_pose.header.frame_id = new_pose.header.frame_id\n copy_pose.header.stamp = rospy.Time.now()\n\n return Pose(*copy_pose.to_list(), frame=new_pose.header.frame_id)",
"def convert_pose_inverse_transform(pose):\n translation = np.zeros((4,1))\n translation[0] = -pose.position.x\n translation[1] = -pose.position.y\n translation[2] = -pose.position.z\n translation[3] = 1.0\n\n rotation = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n euler_angle = tr.euler_from_quaternion(rotation)\n rotation = np.transpose(tr.rotation_matrix(euler_angle[2], [0,0,1])) # the angle is a yaw\n transformed_translation = rotation.dot(translation)\n\n translation = (transformed_translation[0], transformed_translation[1], transformed_translation[2])\n rotation = tr.quaternion_from_matrix(rotation)\n return (translation, rotation)",
"def rectify_pose(pose):\n pose = pose.copy()\n R_mod = cv2.Rodrigues(np.array([np.pi, 0, 0]))[0]\n R_root = cv2.Rodrigues(pose[:3])[0]\n new_root = R_root.dot(R_mod)\n pose[:3] = cv2.Rodrigues(new_root)[0].reshape(3)\n return pose",
"def update_pose(self, data):\n # self.pose = data\n self.x_position = round(data.pose.pose.position.x, 4)\n self.y_position = round(data.pose.pose.position.y, 4)\n [yaw, _, _] = quaternion_to_euler(\n data.pose.pose.orientation.x, \n data.pose.pose.orientation.y, \n data.pose.pose.orientation.z, \n data.pose.pose.orientation.w\n )\n \n self.theta = round(yaw, 4)\n print(f'(Reading) X: {data.pose.pose.position.x}\\t Y:{data.pose.pose.position.y}')\n # self.theta = round(data.pose.pose.orientation.z, 4)",
"def convert_pose_to_xy_and_theta(pose):\n orientation_tuple = (pose.orientation.x, pose.orientation.y, pose.orientation.z, pose.orientation.w)\n angles = euler_from_quaternion(orientation_tuple)\n return pose.position.x, pose.position.y, angles[2]",
"def reformat_pose_to_dict(self, now_pose):\n # now_pose è un dict in particolare { pose : [ {position : [{x : value , y:value , z:value} ] } , {orientation : [] } }\n # devo convertire i quaternioni in amgoli di eulero...estrarre i quaternioni da pose_now e convertirli in angoli RPY\n\n lato_corto_2 = 1.65 #1.45 # offset parcheggio\n \n #correggo gli offset x centrare le macchine nei parcheggi\n\n if abs(round(now_pose.position.x,2)) == 22.45:\n if now_pose.position.x < 0 :\n now_pose.position.x+=lato_corto_2\n now_pose.position.y-=0.4\n else :\n now_pose.position.x-=lato_corto_2\n now_pose.position.y+=0.4\n \n if abs(round(now_pose.position.y,2)) == 22.45:\n if now_pose.position.y < 0 :\n now_pose.position.y+=lato_corto_2\n now_pose.position.x+=0.4\n else :\n now_pose.position.y-=lato_corto_2\n now_pose.position.x-=0.4\n\n # correggo la z per renderla uguale all'asfalto che viene spownata nel mondo\n\n offset_asfalto = 0.3\n\n x = now_pose.position.x\n y = now_pose.position.y\n z = now_pose.position.z + offset_asfalto\n\n q1 = now_pose.orientation.x\n q2 = now_pose.orientation.y\n q3 = now_pose.orientation.z\n q4 = now_pose.orientation.w\n\n\n # converto i quaternioni in angoli di rulero RPY in radianti\n orientation_list = [q1,q2,q3,q4]\n\n euler = euler_from_quaternion( orientation_list )\n roll = euler[0]\n pitch = euler[1]\n yaw = round(euler[2],2) + np.pi\n\n\n # creo la lista dei parametri che mi servono nel campo pose:[] del file .yaml\n\n lista_parametri = [x ,y ,z ,roll ,pitch ,yaw ]\n\n # creo un dict con tutti i campi di cui ho bisogno nel file .yaml\n # settare le chiavi 'name' , ' type ' , 'package' , ' pose ' secondo le proprie necessità\n # i due stili sono equivalenti : usare quello preferito\n \"\"\"\n {\"name\" : \"park1\" , \n \"type\" : \"sdf\" , \n \"package\" : \"object_spawner\" , \n \"pose \":self.seq(lista_parametri) \n }\n \n \"\"\"\n lista_veicoli = ['macchina','pickup','ferrari','prius_hybrid','car_lexus','car_polo','car_volvo','car_golf']\n num_veicoli = 1\n\n #modificare qui implementando una funzione randomica se si vogliono piu veicoli casuali spawnati\n elemento_lista = {'name' : lista_veicoli[3],\n 'type': 'sdf',\n 'package': 'object_spawner',\n 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n #\"\"\"\n # elemento_lista = {'name' : 'ferrari',\n # 'type': 'urdf',\n # 'package': 'autopark',\n # 'pose': self.seq( x , y , z , roll , pitch , yaw)}\n\n return elemento_lista",
"def pose_cb(self, msg):\n self.current_pose_g = msg\n self.enu_2_local()\n\n q0, q1, q2, q3 = (\n self.current_pose_g.pose.pose.orientation.w,\n self.current_pose_g.pose.pose.orientation.x,\n self.current_pose_g.pose.pose.orientation.y,\n self.current_pose_g.pose.pose.orientation.z,\n )\n\n psi = atan2((2 * (q0 * q3 + q1 * q2)),\n (1 - 2 * (pow(q2, 2) + pow(q3, 2))))\n\n self.current_heading_g = degrees(psi) - self.local_offset_g",
"def get_world_pose_for_camera_pose(self, pose):\n\n # Create a point stamped from the given position\n camera_point = geometry_msgs.msg.PointStamped()\n camera_point.header.stamp = rospy.Time.now()\n camera_point.header.frame_id = 'camera'\n camera_point.point.x = pose[0]\n camera_point.point.y = pose[1]\n camera_point.point.z = pose[2]\n\n # Wait for the transformation to be available\n time = rospy.Time().now()\n self.listener.waitForTransform('camera', 'world', time, rospy.Duration(5))\n world_point = self.listener.transformPoint('world', camera_point)\n\n # Return the new coordinates\n return [world_point.point.x, world_point.point.y, world_point.point.z]",
"def convert_pose_to_xy_and_theta(self, passed_stamped_pose):\n # Convert to map coordinate frame from odom\n pose = self.transform(passed_stamped_pose).pose # Apply current transform to given pose\n\n orientation_tuple = (pose.orientation.x,\n pose.orientation.y,\n pose.orientation.z,\n pose.orientation.w)\n angles = t.euler_from_quaternion(orientation_tuple)\n\n return (pose.position.x, pose.position.y, angles[2])",
"def _convert_pose(pose):\n i_vector = [pose.pose_ix, pose.pose_iy, pose.pose_iz]\n j_vector = [pose.pose_jx, pose.pose_jy, pose.pose_jz]\n k_vector = [pose.pose_kx, pose.pose_ky, pose.pose_kz]\n a, b, c = transforms.euler_rpw_by_vectors(i_vector, j_vector, k_vector)\n return [pose.pose_x, pose.pose_y, pose.pose_z, a, b, c]",
"def _get_init_pose(self):\n return self.init_pose_R, self.init_pose_t",
"def update_pose(self, data):\n\t\tself.pose = data\n\t\tself.pose.x = round(self.pose.x, 4)\n\t\tself.pose.y = round(self.pose.y, 4)",
"def getPose(self):\n return self.listener.pose",
"def make_pose(self, position, orientation, frame):\n\n pose = PoseStamped()\n pose.header.frame_id = frame\n pose.pose.position.x = position[0]\n pose.pose.position.y = position[1]\n pose.pose.position.z = position[2]\n pose.pose.orientation.w = orientation[0]\n pose.pose.orientation.x = orientation[1]\n pose.pose.orientation.y = orientation[2]\n pose.pose.orientation.z = orientation[3]\n return pose",
"def get_reconstruction_origin(r) -> np.ndarray:\n s = r.scale\n pose = pygeometry.Pose(np.array([r.rx, r.ry, r.rz]), np.array([r.tx / s, r.ty / s, r.tz / s]))\n return pose.get_origin()",
"def pose_cb(self, msg):\n rospy.loginfo(rospy.get_name() + ': pose received')\n self.current_pose = msg.pose",
"def as_pose(self):\n orientation_tuple = tf.transformations.quaternion_from_euler(0, 0, self.theta)\n return Pose(position=Point(x=self.x, y=self.y, z=0),\n orientation=Quaternion(x=orientation_tuple[0], y=orientation_tuple[1], z=orientation_tuple[2],\n w=orientation_tuple[3]))",
"def human_readable_pose2d(self, pose):\n\n\t\t# create a quaternion from the pose\n\t\tquaternion = (\n\t\tpose.orientation.x,\n\t\tpose.orientation.y,\n\t\tpose.orientation.z,\n\t\tpose.orientation.w\n\t\t)\n\n\t\t# convert quaternion rotation to euler rotation\n\t\troll, pitch, yaw = euler_from_quaternion(quaternion)\n\n\t\tresult = (\n\t\tpose.position.x, # x position\n\t\tpose.position.y, # y position\n\t\tyaw # theta angle\n\t\t)\n\n\t\treturn result"
] | [
"0.6909737",
"0.6768237",
"0.67472076",
"0.6660814",
"0.6647101",
"0.658427",
"0.6541245",
"0.6519557",
"0.6519502",
"0.6515536",
"0.6484813",
"0.6447546",
"0.64341164",
"0.6431541",
"0.63714004",
"0.6367724",
"0.63557184",
"0.6349324",
"0.6282474",
"0.6237291",
"0.6213221",
"0.62130594",
"0.61696666",
"0.61631256",
"0.6140847",
"0.6121499",
"0.61165565",
"0.6113092",
"0.611291",
"0.6109083"
] | 0.7092843 | 0 |
Set the range of physical values that should map onto the MLP | def set_physical_minmax(self, min, max):
# This allows you to set the min and the max of the quantity that you want the MLP to measure.
# Once you set this, you can pass in a physical number to get_mlp_value() and it will be mapped to an MLP value and returned
pass
# Maybe we should let the holder of the MLP determine these values and do the mapping? | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_mapping(self, value_min, value_min_raw, value_max, value_max_raw):\n assert value_min <= value_max\n # prevent division by zero.\n if value_min == value_max:\n value_max += 1.\n if value_min_raw == value_max_raw:\n value_max_raw += 1.\n self.value_min = value_min\n self.value_max = value_max\n self.value_min_raw = value_min_raw\n self.value_max_raw = value_max_raw\n self._value_scale = (self.value_max - self.value_min) / (self.value_max_raw - self.value_min_raw)",
"def setValues(self, values):\n if values is not None:\n self.scale_min, self.scale_max = values\n if self.scale_min is None:\n self.scale_min = self.start\n if self.scale_max is None:\n self.scale_max = self.end\n else:\n self.scale_min = self.start\n self.scale_max = self.end\n self.emitRange()\n self.updateDisplayValues()\n self.update()",
"def setRange(self, x_range, y_range):\n pass",
"def initializeDistribution(self):\n self.minVal = min(math.exp(self.upperBound),math.exp(self.lowerBound))\n self.maxVal = max(math.exp(self.upperBound),math.exp(self.lowerBound))",
"def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...",
"def SetBounds(self, p_float, p_float_1, p_float_2, p_float_3, p_float_4, p_float_5):\n ...",
"def set_visualization_range(self, start: int, end: int):\n self.__range = (start, end)",
"def set_slider_bounds(self,lower,upper,inclusive_bounds=None):\n self.bounds = (lower,upper)\n\n if inclusive_bounds is not None:\n self.inclusive_bounds = inclusive_bounds\n\n epsilon = max(self.slider['resolution'],0.00000000001)\n\n if self.inclusive_bounds[0] is False:\n lower+=epsilon\n if self.inclusive_bounds[1] is False:\n upper-=epsilon\n self.slider.config(from_=lower,to=upper)",
"def set_par_range(self, mins, maxs, frozen):\n self.parmins = mins\n self.parmaxs = maxs\n self.pars_frozen = frozen\n return",
"def setupPhysicalBounds(self):\n \n ### 2018-05-06 WIC - **do not** enforce +/- pi limits on the\n ### angles here.\n self.boundsPhysLo = np.array(\\\n [0.00, 0.00, 0., 0., -np.inf, -np.inf,-np.inf,0 ] )\n self.boundsPhysHi = np.array(\\\n [np.inf, np.inf, 1., np.inf, np.inf, np.inf,np.inf, np.inf ] )",
"def set_range(self, **rangekwargs):\n\n if 'xrange' in rangekwargs.keys(): \n xrange = rangekwargs['xrange']\n else: \n xrange = [-50.0, 50.0] # (default)\n\n if 'yrange' in rangekwargs.keys(): \n yrange = rangekwargs['yrange']\n else: \n yrange = [0.0, 1.25 * self.hist_max]\n\n self.sub.set_xlim(xrange) \n self.sub.set_ylim(yrange) \n\n self.sub.set_xlabel(r\"$\\mathtt{d_{LOS}}$ (Mpc/h)\", fontsize=20)\n\n return None",
"def SetGeoBoundaries(self,minLattitude,maxLattitude,minLongitude,maxLongitude):\n self.randomGenerator.minimumLattitude = minLattitude\n self.randomGenerator.maximumLattitude = maxLattitude\n self.randomGenerator.minimumLongitude = minLongitude\n self.randomGenerator.maximumLongitude = maxLongitude",
"def set_output_limits(self, min_value, max_value):\n self.out_min = min_value\n self.out_max = max_value\n if self.out_min > self.out_max:\n print(\"set_output_limits(): min must be smaller than max.\")\n self.iterm = self.clip_to_output_limits(self.iterm)\n self.output = self.clip_to_output_limits(self.output)",
"def setRange(self):\n # first determine ranges\n if len(self.activeWeapons) > 0:\n myPrimaryWeapon = self.activeWeapons[0]\n self.range = myPrimaryWeapon.myWeaponData.range * 1.0\n else:\n # no weapons left RUN\n self.mode = 'escape'\n self.range = 99999",
"def _update_data_range(self):\r\n self._h_min = np.min(self.h)\r\n self._h_max = np.max(self.h)\r\n self._hr_min = np.min(self.hr)\r\n self._hr_max = np.max(self.hr)\r\n self._m_min = np.nanmin(self.m)\r\n self._m_max = np.nanmax(self.m)\r\n\r\n if self.temperature is None or np.all(np.isnan(self.temperature)):\r\n self._T_min = np.nan\r\n self._T_max = np.nan\r\n else:\r\n self._T_min = np.nanmin(self.temperature)\r\n self._T_max = np.nanmax(self.temperature)\r\n\r\n return",
"def _setBound(self, value):\n if self._colormap is not None:\n if self._index == 0:\n min_ = value\n max_ = self._colormap.getVMax()\n else: # self._index == 1\n min_ = self._colormap.getVMin()\n max_ = value\n\n if max_ is not None and min_ is not None and min_ > max_:\n min_, max_ = max_, min_\n self._colormap.setVRange(min_, max_)",
"def set_min_max(self, xmin, xmax, ymin, ymax, zmin, zmax):\n\n self.xmin = xmin\n self.xmax = xmax\n self.ymin = ymin\n self.ymax = ymax\n self.zmin = zmin\n self.zmax = zmax",
"def getRangeMM(self) -> float:\n ...",
"def setMinMaxPoints(self, minPoint, maxPoint):\n self.minPoint = minPoint\n self.maxPoint = maxPoint\n self.calculate()",
"def setRange(self, range):\n\t\tself.range = range\n\t\tself.slider.setMinimum(0.0)\n\t\tself.slider.setMaximum(100.0)\n\t\tself.spinbox.setRange(self.range[0], self.range[1])\n\n\t\tdiff = self.range[1] - self.range[0]\n\t\tif diff <= 1:\n\t\t\tself.spinbox.setSingleStep(0.01)",
"def set_range(self, value):\n self.gauge.SetRange(value)",
"def set_range(self, new_range):\n self.range = new_range\n if new_range == 2:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x00')\n self.get_offset()\n elif new_range == 4:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x01')\n self.get_offset()\n elif new_range == 8:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x02')\n self.get_offset()\n elif new_range == 16:\n self.i2c.writeto_mem(accel_address, data_format, b'\\x03')\n self.get_offset()\n else:\n print(\"range can be 2, 4, 8, or 16\")",
"def _init_mass(self, mmin, mmax, nmbin):\n self.lgmass, self.dlgmass = np.linspace(log10(mmin), log10(mmax), \n num=nmbin, retstep=True)\n self.mass = np.power(10.0, self.lgmass) \n self.scale = np.power(self.r3conv*self.mass, 1.0/3.0) \n self.lgmmax = self.lgmass[-1]\n self.lgmmin = self.lgmass[0]",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def __init__(self, min_val, max_val):\n self.values = (min_val, max_val)",
"def set_low_high_value(self):\n # do not apply scaler norm on not scalable data\n self.range_dict.clear()\n\n for data_name in self.dict_to_plot.keys():\n if self.quantitative_normalization:\n # Quantitative normalization\n data_arr, _ = self.img_model_adv.param_quant_analysis.apply_quantitative_normalization(\n data_in=self.dict_to_plot[data_name],\n scaler_dict=self.scaler_norm_dict,\n scaler_name_default=self.get_selected_scaler_name(),\n data_name=data_name,\n ref_name=self.quantitative_ref_eline,\n name_not_scalable=self.name_not_scalable,\n )\n else:\n # Normalize by the selected scaler in a regular way\n data_arr = normalize_data_by_scaler(\n data_in=self.dict_to_plot[data_name],\n scaler=self.scaler_data,\n data_name=data_name,\n name_not_scalable=self.name_not_scalable,\n )\n\n lowv, highv = np.min(data_arr), np.max(data_arr)\n # Create some 'artificially' small range in case the array is constant\n if lowv == highv:\n lowv -= 0.005\n highv += 0.005\n self.range_dict[data_name] = {\"low\": lowv, \"low_default\": lowv, \"high\": highv, \"high_default\": highv}",
"def range_to_m(self, data):\n return data * self._total_range + self._min_range_m",
"def __init__(__self__, *,\n max: pulumi.Input[int],\n min: pulumi.Input[int]):\n pulumi.set(__self__, \"max\", max)\n pulumi.set(__self__, \"min\", min)"
] | [
"0.693925",
"0.6680546",
"0.66189826",
"0.65850914",
"0.6456573",
"0.6456573",
"0.6417098",
"0.6364023",
"0.63490635",
"0.6339034",
"0.6289142",
"0.6263453",
"0.62625813",
"0.62568754",
"0.6235019",
"0.6200884",
"0.61912847",
"0.61842424",
"0.6182446",
"0.6177519",
"0.61768967",
"0.61756676",
"0.61233896",
"0.60998917",
"0.60998917",
"0.60998917",
"0.60998917",
"0.6091888",
"0.6075332",
"0.60526717"
] | 0.79315037 | 0 |
Get the torque on the motor from the brakes | def get_motor_load_torque(self):
# Start with the brake normal
# change to 17deg (tan 17?)
# change to torque using the pitch of the thread on the ball screw
# (^ make sure to take friction into account)
# That should give us the torque acting on the motor. If this torque is greater than the motor max torque, it will slip
# Take into account that the max holding torque is different from the max torque. How do we know if the motor is holding or moving?
# How do we control the stepper motor? Where are the routines for that?
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_torque(self):\n return self.node.sdo[0x6077].phys # rate torque(mN.m) /1000",
"def mTorque(self):\n pass",
"def aTorque(self):\n pass",
"def motor_torques(self):\n raise NotImplementedError('Not yet implemented!')",
"def gTorque(self):\n pass",
"def joints_torque(self):\r\n return self._arm.joints_torque",
"def _compute_aero_torque(self):\n pass",
"def sTorque(self):\n pass",
"def get_motor_torques(\n self,\n motor_commands: np.ndarray,\n motor_control_mode=None) -> Tuple[np.ndarray, np.ndarray]:\n if not motor_control_mode:\n motor_control_mode = self._motor_control_mode\n\n motor_torques = None\n\n if motor_control_mode is robot_config.MotorControlMode.TORQUE:\n motor_torques = motor_commands\n\n if motor_control_mode is robot_config.MotorControlMode.POSITION:\n motor_torques = self._compute_pd_torques(\n desired_motor_angles=motor_commands,\n kp=self._kp,\n desired_motor_velocities=self._zero_array,\n kd=self._kd)\n \n if motor_torques is None:\n raise ValueError(\n \"{} is not a supported motor control mode\".format(motor_control_mode))\n\n # Apply the output filter to model actuator dynamics\n # BUG: Causes big instability in the sim\n # motor_torques = self._torque_filter(motor_torques)\n\n # Hard-code torque limits until the torque limit bug is fixed\n motor_torques = np.clip(motor_torques, -1.7, 1.7)\n\n # Apply motor damping and friction\n motor_torques -= (np.sign(self._previous_true_motor_velocity) *\n self._motor_torque_dependent_friction *\n motor_torques)\n motor_torques -= self._previous_true_motor_velocity * self._motor_damping\n\n # Rescale and clip the motor torques as needed.\n motor_torques = self._strength_ratios * motor_torques\n if (self._torque_lower_limits is not None or\n self._torque_upper_limits is not None):\n motor_torques = np.clip(motor_torques, self._torque_lower_limits,\n self._torque_upper_limits)\n\n return motor_torques, motor_torques",
"def getMotor(self):\n return self._l[3]",
"def comp_torque(self, output):\n\n N0 = output.elec.N0\n omega = 2 * pi * N0 / 60\n\n P = output.elec.Pem_av_ref\n losses = output.elec.Pj_losses # TODO update since there may also be other losses\n\n Tem_av_ref = (P - losses) / omega\n\n output.elec.Tem_av_ref = Tem_av_ref",
"def torque(system, /, use_demag=True):\n if use_demag:\n total_field = (mm.consts.mu0 *\n (oc.compute(system.energy.demag.effective_field, system)\n + system.energy.zeeman.H))\n else:\n total_field = mm.consts.mu0 * np.array(system.energy.zeeman.H)\n norm_field = df.Field(system.m.mesh, dim=1,\n value=(system.m.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n moment = system.m * volume\n torque = (moment & total_field)\n return (df.integral(torque * df.dV / volume**2, direction='xyz'))",
"def _compute_solar_torque(self):\n pass",
"def setMotorTorque(self, torque):\r\n if torque < 0.0:\r\n torque = 0.0\r\n elif torque > 1.0:\r\n torque = 1.0\r\n torque *= self.maxTorque\r\n if self.reverse:\r\n torque *= -1\r\n dTorque = 2\r\n if self.torque < torque:\r\n self.torque += dTorque\r\n elif self.torque > torque:\r\n self.torque -= dTorque\r\n \r\n for tire in self.tires:\r\n if tire.torque:\r\n tire.shape.setMotorTorque( self.torque )",
"def make_torque(self):\n def torque_func(m):\n heff = self.field(m)\n total_torque = torque.landau_lifshitz(m, heff, self.damping)\n if self.stt != 0:\n total_torque += torque.slonczewski(m, self.Jc, self.stt)\n return total_torque\n self.torque = torque_func",
"def get_torque(self, theta, modulus):\n\n\t\treturn self.get_k(modulus)*theta",
"def _compute_gravity_torque(self):\n pass",
"def test_get_torquePerc(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, TORQUE_PERC_IDX, TORQUE_PERC_SUB)\n param_obj = self.__dict__[servo_type]._get_torquePerc()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in torquePerc...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def velocity_to_JointTorqueMsg(self):\n\t\t\tjointCmd = kinova_msgs.msg.JointTorque()\n\t\t\tjointCmd.joint1 = self.torque[0][0];\n\t\t\tjointCmd.joint2 = self.torque[1][1];\n\t\t\tjointCmd.joint3 = self.torque[2][2];\n\t\t\tjointCmd.joint4 = self.torque[3][3];\n\t\t\tjointCmd.joint5 = self.torque[4][4];\n\t\t\tjointCmd.joint6 = self.torque[5][5];\n\t\t\tjointCmd.joint7 = self.torque[6][6];\n\n\t\t\treturn jointCmd",
"def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO",
"def _compute_magnetic_torque(self, curr_date):\n if self._to_add[1]:\n gP = self.earth.transform(self.satPos_i, self.in_frame, curr_date)\n\n topoframe = TopocentricFrame(self.earth, gP, 'ENU')\n topo2inertial = topoframe.getTransformTo(self.in_frame, curr_date)\n\n lat = gP.getLatitude()\n lon = gP.getLongitude()\n alt = gP.getAltitude() / 1e3 # Mag. Field needs degrees and [km]\n\n # get B-field in geodetic system (X:East, Y:North, Z:Nadir)\n B_geo = FileDataHandler.mag_field_model.calculateField(\n degrees(lat), degrees(lon), alt).getFieldVector()\n\n # convert geodetic frame to inertial and from [nT] to [T]\n B_i = topo2inertial.transformVector(Vector3D(1e-9, B_geo))\n\n B_b = self.inertial2Sat.applyTo(B_i)\n B_b = np.array([B_b.x, B_b.y, B_b.z])\n\n dipoleVector = self.dipoleM.getDipoleVectors(B_b)\n\n torque = np.sum(np.cross(dipoleVector, B_b), axis=0)\n\n self._mTorque = Vector3D(float(torque[0]), float(torque[1]), float(torque[2]))\n else:\n self._mTorque = Vector3D.ZERO",
"def get_motor_gains(self):\n return self._kp, self._kd",
"def set_hybrid_control(self, model, max_force_torque, timeout=5.0, stop_on_target_force=False):\n\n reduced_speed = np.deg2rad([100, 100, 100, 150, 150, 150])\n q_last = self.joint_angles()\n\n # Timeout for motion\n initime = rospy.get_time()\n xb = self.end_effector()\n failure_counter = 0\n\n while not rospy.is_shutdown() \\\n and (rospy.get_time() - initime) < timeout:\n\n # Transform wrench to the base_link frame\n Wb = self.get_ee_wrench()\n\n # Current Force in task-space\n Fb = -1 * Wb\n # Safety limits: max force\n if np.any(np.abs(Fb) > max_force_torque):\n rospy.logerr('Maximum force/torque exceeded {}'.format(np.round(Wb, 3)))\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return FORCE_TORQUE_EXCEEDED\n\n if stop_on_target_force and np.any(np.abs(Fb)[model.target_force != 0] > model.target_force[model.target_force != 0]):\n rospy.loginfo('Target F/T reached {}'.format(np.round(Wb, 3)) + ' Stopping!')\n self.set_target_pose_flex(pose=xb, t=model.dt)\n return STOP_ON_TARGET_FORCE\n\n # Current position in task-space\n xb = self.end_effector()\n\n dxf = model.control_position_orientation(Fb, xb) # angular velocity\n\n # Limit linear/angular velocity\n dxf[:3] = np.clip(dxf[:3], -0.5, 0.5)\n dxf[3:] = np.clip(dxf[3:], -5., 5.)\n\n xc = transformations.pose_from_angular_velocity(xb, dxf, dt=model.dt)\n\n # Avoid extra acceleration when a point failed due to IK or other violation\n # So, this corrects the allowed time for the next point\n dt = model.dt * (failure_counter+1)\n\n q = self._solve_ik(xc)\n if q is None:\n rospy.logwarn(\"IK not found\")\n result = IK_NOT_FOUND\n else:\n q_speed = (q_last - q)/dt\n if np.any(np.abs(q_speed) > reduced_speed):\n rospy.logwarn(\"Exceeded reduced max speed %s deg/s, Ignoring command\" % np.round(np.rad2deg(q_speed), 0))\n result = SPEED_LIMIT_EXCEEDED\n else:\n result = self.set_joint_positions_flex(position=q, t=dt)\n\n if result != DONE:\n failure_counter += 1\n continue # Don't wait since there is not motion\n else:\n failure_counter = 0\n\n # Compensate the time allocated to the next command when there are failures\n for _ in range(failure_counter+1):\n self.rate.sleep()\n\n q_last = self.joint_angles()\n return DONE",
"def getMotorCommands():\n\n motor_commands = RoboCaller().call(\"getMotorCommands\", \"int\")\n for i in range(len(motor_commands)):\n motor_commands[i] = (motor_commands[i] + 2**15) % 2**16 - 2**15\n return motor_commands",
"def four_wheel_drive(x, y, heading, speed, length, steering_angle, gas, brake, gas_to_acc=1, brake_to_acc=1):\n\n return x, y, heading, speed",
"def compute_torques(self, caller):\n if caller == 'pose':\n self.pose_call = True\n if caller == 'vel':\n self.vel_call = True\n #If both vels and poses has called compute torques\n if self.pose_call and self.vel_call:\n #Reset checkers\n self.pose_call = False\n self.vel_call = False\n #Vels and poses\n # print \"Heard:\"\n # print \" \".join(str(n) for n in self.joints_vels)\n # print \" \".join(str(n) for n in self.joints_poses)\n #Compute B g and C matrices\n array_vels = np.asarray(self.joints_vels)[np.newaxis].T\n array_poses = np.asarray(self.joints_poses)[np.newaxis].T\n # print(\"array_vels\")\n # print(array_vels[2:4])\n # print(\"array_poses\")\n # print(array_poses[2:4])\n err_vels = array_vels[1:4] - self.target_vel\n err_poses = array_poses[1:4] - self.target_pose\n print(\"velocity error:\")\n print(err_vels)\n print(\"position error:\")\n print(err_poses)\n B = np.matrix([[0.0040055721446399998476906034738931*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.0013481452371199999142570291610355*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.011671172651879999466092491395841*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0039281369187899997198368480111341*sin(self.joints_poses[2]) + 0.042812399753418998939427354098797,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\\\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0058355863259399997330462456979205*cos(self.joints_poses[2]) + 0.0040085638208*cos(self.joints_poses[3]) - 0.0019640684593949998599184240055671*sin(self.joints_poses[2]) + 0.01625959562072499985284632093574,\\\n 0.0040085638208*cos(self.joints_poses[3]) + 0.01618298062072499985284632093574,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171],\n [0.0020027860723199999238453017369466*cos(self.joints_poses[2] + self.joints_poses[3]) - 0.00067407261855999995712851458051773*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0020042819104*cos(self.joints_poses[3]) + 0.0026794854106086355903769417993171,\\\n 0.0026403112045896820614231443819367]])\n\n C = np.matrix([[- 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n - 0.176*self.joints_vels[3]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])) - 1.0*self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 1.0*self.joints_vels[2]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])),\\\n -0.176*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3]))],\\\n [self.joints_vels[1]*(0.00067407261855999995712851458051773*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.0020027860723199999238453017369466*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0019640684593949998599184240055671*cos(self.joints_poses[2]) + 0.0058355863259399997330462456979205*sin(self.joints_poses[2])) - 0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*self.joints_vels[3]*sin(self.joints_poses[3]),\\\n -0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2] + self.joints_vels[3])],\\\n [0.0020042819104*self.joints_vels[2]*sin(self.joints_poses[3]) + 0.176*self.joints_vels[1]*(0.0038299580599999997564120146620326*cos(self.joints_poses[2] + self.joints_poses[3]) + 0.011379466319999999567302850778105*sin(self.joints_poses[2] + self.joints_poses[3]) + 0.0113879654*sin(self.joints_poses[3])),\\\n 0.0020042819104*sin(self.joints_poses[3])*(self.joints_vels[1] + self.joints_vels[2]),0]])\n\n g = np.array([[0.69474494555999997358275432901564*cos(self.joints_poses[1]) + 0.21649055273999998623105089912144*sin(self.joints_poses[1]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1])*cos(self.joints_poses[2]) - 0.40336448984999999688544018994207*sin(self.joints_poses[1])*sin(self.joints_poses[2]) + 0.1384355808*cos(self.joints_poses[1])*cos(self.joints_poses[2])*cos(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[1])*sin(self.joints_poses[2])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[2])*sin(self.joints_poses[1])*sin(self.joints_poses[3]) - 0.1384355808*cos(self.joints_poses[3])*sin(self.joints_poses[1])*sin(self.joints_poses[2])],\\\n [0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3]) + 0.40336448984999999688544018994207*cos(self.joints_poses[1] + self.joints_poses[2])],\\\n [ 0.1384355808*cos(self.joints_poses[1] + self.joints_poses[2] + self.joints_poses[3])]])\n #Compute control torque\n control_from_errors = self.target_acc -np.dot(self.KD, err_vels) - np.dot(self.KP, err_poses)\n print(\"Derivative contribution: \")\n print(np.dot(self.KD, err_vels))\n print(\"proportional contribution: \")\n print(np.dot(self.KP, err_poses))\n control_torque = np.dot(C, self.target_vel) + g + np.dot(B, control_from_errors)\n print(\"Torques: \")\n print(control_torque)\n #Create ROS message\n self.torques.layout.dim = [self.torques_layout]\n # self.torques.layout.dim.size = 6\n # self.torques.layout.dim.stride = 1\n self.torques.layout.data_offset = 0\n self.torques.data = [0.0, control_torque[0], control_torque[1], control_torque[2], 0.0, 0.0]\n self.torque_pub.publish(self.torques)",
"def _get_omega(self, vehicle_id):\n pos = self.positions[vehicle_id]\n omega = self.frenets[vehicle_id].get_omega(\n pos[0], pos[1], pos[2], pos[3])\n\n return omega",
"def get_right(self):\n return self.r_motor.get()",
"def _compute_pd_torques(\n self,\n desired_motor_angles: np.ndarray,\n kp: np.ndarray,\n desired_motor_velocities,\n kd: np.ndarray,\n ) -> Tuple[np.ndarray, np.ndarray]:\n motor_angles, motor_velocities = self.get_motor_states()\n motor_torques = -kp * (motor_angles - desired_motor_angles) - kd * (\n motor_velocities - desired_motor_velocities)\n\n return motor_torques",
"def motor_velocities(self):\n return np.asarray(self._robot_state.velocity)"
] | [
"0.70026714",
"0.67918426",
"0.6789097",
"0.67424893",
"0.6490259",
"0.6446426",
"0.6392001",
"0.6252369",
"0.615059",
"0.61473376",
"0.61432093",
"0.61259156",
"0.6105141",
"0.61044025",
"0.5916003",
"0.58762723",
"0.583563",
"0.5775201",
"0.57536286",
"0.5740155",
"0.5740155",
"0.5717284",
"0.562919",
"0.5581799",
"0.54670846",
"0.546448",
"0.5446465",
"0.54323626",
"0.54224896",
"0.54079396"
] | 0.8009619 | 0 |
Get all my daily_schedule | def get_my_schedules():
schedules = DailyScheduleModel.get_daily_schedules_by_user(g.user.get('id'))
user_schedules = daily_schedule_schema.dump(schedules, many=True)
return custom_response(user_schedules, 200) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getSchedules(self) :\n return self.schedules",
"def get_schedules():\n path = config.get('schedule', 'paths', './schedule.json')\n with open(path) as schedule_file:\n return json.load(schedule_file)",
"def all_schedules(self):\n return self._all_schedules",
"def get_schedules():\n return json.dumps(calendar.get_schedules())",
"def get_schedules(self):\n return self.__schedules",
"def list_schedules(session, logger):\n for sched in session.query(Schedule).all():\n logger.info(\"- {}\".format(sched))",
"def get_schedule():\n startdate = '02/28/2020'\n enddate = '04/01/2020'\n return statsapi.schedule(start_date=startdate, end_date=enddate, team=134)",
"async def async_forecast_daily(self) -> list[Forecast]:\n return self._forecast_daily",
"def test_list_schedules(self):\n pass",
"def list_schedules(self):\n with self.get(\"/v3/schedule/list\") as res:\n code, body = res.status, res.read()\n if code != 200:\n self.raise_error(\"List schedules failed\", res, body)\n js = self.checked_json(body, [\"schedules\"])\n\n return [schedule_to_tuple(m) for m in js[\"schedules\"]]",
"def get(self):\n try:\n result = load_schedules_from_file()\n return result\n except Exception:\n logging.exception('Failed to get Celery Beat schedules!')\n raise",
"def all():\n schedule = Scheduler()\n schedule.committees()\n schedule.legislators()\n schedule.bills()",
"def schedule(self, schedule_id=None):\n q = Lesson.query_current(schedule_id)\n lessons = q.filter(Lesson.group_id == self.id).all()\n\n if len(lessons) == 0:\n return None\n\n days = {}\n for x in range(0,5):\n days[x] = []\n for lesson in lessons:\n days[lesson.day].append(lesson)\n schedule = []\n for day in days.values():\n schedule.append(self._process_schedule(day))\n\n return schedule",
"async def get_scheduler_entries(self):\n return await self.AD.sched.get_scheduler_entries()",
"def schedule(self, schedule_id=None, eager=True):\n q = Lesson.query_current(schedule_id)\n q = q.filter(Lesson.teacher_id == self.id)\n\n if eager:\n q = q.options(eagerload('group'), eagerload('group.year'))\n\n days = {}\n for x in range(0,5):\n days[x] = []\n for lesson in q.all():\n days[lesson.day].append(lesson)\n schedule = []\n for day in days.values():\n schedule.append(self._process_schedule(day))\n return schedule",
"def get_schedules(self) -> List[SwitcherV2Schedule]:\n return self._schedule_list",
"def _create_schedules(self):\n\n ''''''",
"def show_daySchedule(doc_user, date, logger):\n ret = []\n my_calendar = col_calendar.find_one({\"User\": doc_user[\"_id\"]})\n if my_calendar != None:\n ret = my_calendar[\"schedules\"]\n\n show_events = []\n if ret:\n for schedule in ret:\n if schedule[\"date\"] == date:\n show_events += schedule[\"event\"]\n logger.info('{}: show chosen date schedule list = {}'.format(date, show_events))\n\n return show_events",
"def get(self, controller, data, *args, **kwargs):\n scheduler = controller.get_scheduler()\n data = scheduler.get_entries()\n res = [i[1].info() for i in data]\n resp = {\n u'schedules':res,\n u'count':len(res)\n }\n return resp",
"def daily(self):\n url = f\"?function=TIME_SERIES_DAILY&{self.url_append}\"\n data = self.client.get(url=url).json()\n return self.__process__(data[f\"Time Series (Daily)\"], metadata=data[\"Meta Data\"], format=\"%Y-%m-%d\")",
"def list_schedules(profile=None, api_key=None):\n return salt.utils.pagerduty.list_items(\n \"schedules\", \"id\", __salt__[\"config.option\"](profile), api_key, opts=__opts__\n )",
"def spm_schedules(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"spm_schedules\"), kwargs)",
"def _get_day_attack_schedule(self):\n planer_args = self.planner_config[\"args\"]\n start_time = datetime.strptime(planer_args[\"min_time\"], \"%H:%M\").time()\n start_date = datetime.combine(datetime.today().date(), start_time)\n end_time = datetime.strptime(planer_args[\"max_time\"], \"%H:%M\").time()\n end_date = datetime.combine(datetime.today().date(), end_time)\n\n random.seed()\n attack_schedule = []\n for start, end in self._split_date_range(start_date, end_date, planer_args[\"times\"]):\n attack_schedule.append(random.uniform(start, end))\n\n return attack_schedule",
"def daily(self):\r\n return RecordsDaily(self)",
"def get_all_schedules(self, yearly_only=False):\n schedule_types = list(map(str.upper, self.getiddgroupdict()[\"Schedules\"]))\n if yearly_only:\n schedule_types = [\n \"Schedule:Year\".upper(),\n \"Schedule:Compact\".upper(),\n \"Schedule:Constant\".upper(),\n \"Schedule:File\".upper(),\n ]\n scheds = {}\n for sched_type in schedule_types:\n for sched in self.idfobjects[sched_type]:\n try:\n if sched.key.upper() in schedule_types:\n scheds[sched.Name.upper()] = sched\n except KeyError:\n pass\n return scheds",
"def extra_tasks_for_today(self):\n localtz = tzlocal()\n datetime_today = datetime.fromtimestamp(rospy.get_rostime().to_sec(), tz=localtz)\n day_today = datetime_today.strftime(\"%A\")\n date_today = datetime_today.date()\n rospy.loginfo('Looking for daily tasks for %s, %s' % (day_today, date_today))\n \n eight_forty_five= time(8,45, tzinfo=localtz)\n eleven_thirty= time(11,30, tzinfo=localtz)\n fourteen_thirty=time(14,30, tzinfo=localtz)\n seventeen_fifteen= time(17,15, tzinfo=localtz)\n past_bedtime = time(23,59, tzinfo=localtz)\n \n # day_end = seventeen_fifteen\n day_end = past_bedtime\n\n\n\n metric_wps=['WayPoint13', 'WayPoint18', 'WayPoint9','WayPoint11','WayPoint5','WayPoint3'] \n object_learn_wps=['WayPoint13', 'WayPoint18', 'WayPoint9', 'WayPoint11'] \n object_search_wps=['WayPoint1', 'WayPoint2', 'WayPoint3']\n door_wps=['WayPoint7', 'WayPoint4']\n \n morning_start = eight_forty_five\n morning_duration = delta_between(eleven_thirty, morning_start)\n \n lunch_start = eleven_thirty\n lunch_duration = delta_between(fourteen_thirty, lunch_start)\n\n afternoon_start = fourteen_thirty\n afternoon_duration = delta_between(day_end, afternoon_start)\n\n tasks = []\n \n #door checks at fixed times (to evaluate system ability to do stuff at corret times)\n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(10,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(13,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n task=create_door_check_task(door_wps[0])\n start_time=datetime.combine(date_today, time(16,30, tzinfo=localtz))\n end_time = start_time+timedelta(seconds=30)\n task.start_after=rospy.Time(unix_time(start_time))\n task.end_before=rospy.Time(unix_time(end_time))\n tasks.append(task)\n \n \n #random tasks\n for i in range(4):\n #morning\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, morning_start, morning_duration)\n tasks.append(task)\n \n #lunch (less tasks because we want the robot mostly learning people tracks)\n if i<1:\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, lunch_start, lunch_duration)\n tasks.append(task)\n \n \n #afternoon\n task=create_metric_map_task(random.choice(metric_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_door_check_task(random.choice(door_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n if i<3:\n task=create_object_learn_task(random.choice(object_learn_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n \n task=create_object_search_task(random.choice(object_search_wps))\n self.set_random_task_time(task, date_today, afternoon_start, afternoon_duration)\n tasks.append(task)\n return tasks",
"def get(self):\n try:\n tasks = tasks_overdue(get_db())\n return list(map(task_to_dict, tasks))\n except ValueError:\n api.abort(422, \"Invalid Date\")",
"def get_crons(self) -> dict:\n uri = f\"{self.uri}/crons\"\n\n response = self.request(uri=uri)\n return response.json()",
"def get_item_daily(item_id, realm_index) -> list:\n\n # get item json and direct to daily\n item_daily = __get_item_json__(item_id, realm_index)[\n \"daily\"]\n\n return item_daily",
"def list_schedules(connection, fields=None, error_msg=None):\n\n response = connection.get(\n url=f'{connection.base_url}/api/schedules', params={'fields': fields}\n )\n if response.ok:\n # Fix for incorrect 'eventId' (expecting 'id')\n event_based_in_list = False\n response_json = response.json()\n for schedule in response_json['schedules']:\n if 'event' in schedule:\n schedule['event']['id'] = schedule['event'].pop('eventId')\n event_based_in_list = True\n if event_based_in_list:\n response.encoding, response._content = 'utf-8', json.dumps(\n response_json\n ).encode('utf-8')\n\n return response"
] | [
"0.7289857",
"0.71636593",
"0.7133032",
"0.71146274",
"0.70769674",
"0.6985168",
"0.6768955",
"0.66982865",
"0.6636281",
"0.6565948",
"0.65510726",
"0.6524397",
"0.64645636",
"0.6455582",
"0.6405482",
"0.63976073",
"0.63691497",
"0.6340968",
"0.6331203",
"0.6315706",
"0.6271787",
"0.6267425",
"0.6250557",
"0.62360215",
"0.62340003",
"0.6217908",
"0.6206875",
"0.6191964",
"0.6175593",
"0.6121328"
] | 0.75095713 | 0 |
Adds field to document contents. Field value can be a list, where each item is added separately (i.e., the field is multivalued). | def __add_to_contents(self, field_name, field_value, field_type):
if type(field_value) is list:
for fv in field_value:
self.__add_to_contents(field_name, fv, field_type)
else:
if len(field_value) > 0: # ignore empty fields
self.contents.append({'field_name': field_name,
'field_value': field_value,
'field_type': field_type}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __add_to_contents(self, field_name, field_value, field_type):\n self.contents.append({'field_name': field_name,\n 'field_value': field_value,\n 'field_type': field_type})",
"def append(dest, field, value):\n if isinstance(dest[field], list):\n dest[field].append(value)\n else:\n dest[field] = [dest[field], value]",
"def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)",
"def add_field(self, field, field_data):\n self.extra_fields[field] = field_data",
"def add_field(self, name, value):\n if not isinstance(value, str):\n value = json.dumps(value, ensure_ascii=False)\n self.form_fields.append((name, value))\n return",
"def _add_fields(self, fields):\n for field in fields:\n self.add(field)",
"def append(self, value):\n self.__field.validate_element(value)\n return list.append(self, value)",
"def add_field(self, name, value):\n self.form_fields.append((name, value))\n return",
"def add_field(self, name, value):\n self.form_fields.append((name, value))\n return",
"def add_field(self, name, value):\n self.form_fields.append((name, value))\n return",
"def add_field(self, **kwargs):\n field = {\n 'name': kwargs.get('name'),\n 'value': kwargs.get('value'),\n 'inline': kwargs.get('inline', False)\n }\n\n self.fields.append(field)",
"def add(self, field_name, value, **kwargs):\n self.properties.update(kwargs)\n model = self.model.get_subclass_model(**self.properties)\n\n self.array_validate(field_name, value, model)\n return self.array_process(field_name, value, operation_type='add')",
"def lst_to_field(table, field, lst):\n if len(lst) == 0:\n message(\"No values to add to '{}'.\".format(field))\n elif field_exists(table, field): \n with arcpy.da.UpdateCursor(table, [field]) as cursor:\n # For row in cursor:\n for i, row in enumerate(cursor):\n row[0] = lst[i]\n cursor.updateRow(row)\n else:\n message(\"{} field not found in {}\".format(field, table))",
"def add_field(self, field):\n self.add_field_sig(FieldSignature.from_field(field))",
"def add_field(self, field):\n self.add_field_sig(FieldSignature.from_field(field))",
"def add_fields(self, *fields: Field):\n self.fields.extend(fields)",
"def __set__(self, instance, value):\n # Run process for the nested field type for each value in list\n instance._values[self.name] = [self.field.process(v) for v in value]",
"def add(self, name, value):\n assert isinstance(name, str)\n\n if isinstance(value, str):\n self.__getitem__(name).append(value)\n elif isinstance(value, Iterable):\n self.__getitem__(name).extend(value)\n elif isinstance(value, datetime):\n self.__getitem__(name).append(rfc1123_datetime_encode(value))\n else:\n self.__getitem__(name).append(str(value))",
"def add_list(path, value):\n print(uc.add_list(path, value))",
"def add_field(self, field_data):\n def_field = {'id':None,\n 'ref':None,\n 'posx':'0',\n 'posy':'0',\n 'size':'50',\n 'text_orientation':'H',\n 'visible':'V',\n 'text_align':'L',\n 'props':'CNN'\n }\n\n field = dict(list(def_field.items()) + list(field_data.items()))\n #field['id'] = str(len(self.fields))\n\n self.fields.append(field)\n return field",
"def addDataField(self, name, value):\n if name in self.__examples: \n raise ValueError(\"Field already exists: \" + name)\n \n self.__storeDataField(name, value)",
"def append(self, field):\r\n\r\n field = to_field(field)\r\n self._fields.append(field)\r\n self._field_dict[field.name] = field\r\n self._field_names.append(field.name)",
"def add_field(self, field_name, field):\n self.fields[field_name] = field",
"def add_new_item_field(*fields, **keywords):\n\n for field in fields:\n print \"Creating {0} custom field...\".format(field)\n doc = frappe.get_doc({\n \"doctype\": \"Custom Field\",\n \"dt\": \"Item\",\n \"fieldtype\": \"Data\",\n \"label\": field,\n \"insert_after\": keywords['insert_after']\n })\n doc.insert()\n\n print \"-----\"\n print \"Finished creating custom fields...\"\n print \"-----\"",
"def add_field(self, field_name):\n field = utils.get_field(field_name, self.model)\n if utils.is_one(field):\n self._one_fields.add(field_name)\n else:\n self._many_fields.add(field_name)",
"def serialize_field(field: str) -> Callable[[Dict], None]:\n\n def f(data: Dict):\n \"\"\"\n Serialize specific field of type list\n \"\"\"\n if field in data and isinstance(data[field], List):\n data[field] = \",\".join(data[field])\n\n return f",
"def add_field(self, parameters):\n field = Field(self.context)\n self.add_child(field)\n qry = ServiceOperationQuery(self, \"AddField\", None, parameters, \"parameters\", field)\n self.context.add_query(qry)\n return field",
"def add_field(self,\r\n fieldname,\r\n entrylist,\r\n check=False):\r\n if self.read_only:\r\n display.noteprint((alerts.ATTENTION,'CANNOT EXECUTE: READ ONLY'))\r\n return False\r\n\r\n for e_temp in entrylist:\r\n if str(e_temp) in self.default_dict['field'] and check:\r\n temp_query = alerts.CHANGE+BLANK+self.default_dict['field'][str(e_temp)]\\\r\n +BLANK+alerts.TO+BLANK+fieldname+QUESTIONMARK\r\n if input(temp_query) not in YESTERMS:\r\n self.default_dict['field'][str(e_temp)] = fieldname\r\n else:\r\n self.default_dict['field'][str(e_temp)] = fieldname\r\n self.dd_changed = True",
"def append_list(section, option, value):\n\tv = get_list(section, option, [])\n\tv.append(value)\n\treturn set_list(section, option, v)",
"def add_field(self, field_name, label, description, type, function=None):\n new_field = {\n \"label\": label,\n \"description\": description,\n \"type\": type,\n }\n if function is not None:\n new_field[\"source\"] = \"function\"\n self.fields[field_name] = function\n else:\n new_field[\"source\"] = \"system\"\n self.fields[field_name] = \"No value\"\n self.description[\"fields\"][\"values\"][field_name] = new_field\n\n # update MongoDB\n #self.mongo_client.cps2_project.objects.update_one(\n #{\"_id\": self.mongo_id},\n #{\"$set\": {\"fields.values.\" + field_name: new_field,\n #\"last_modified.value\": str(datetime.utcnow())}\n #}\n #)\n print(\"Added a new field called \\\"\" + field_name + \"\\\" and updated MongoDB.\")"
] | [
"0.7012308",
"0.6697444",
"0.63506097",
"0.62795776",
"0.6221397",
"0.60970956",
"0.6074363",
"0.6061657",
"0.6061657",
"0.6061657",
"0.5992678",
"0.59347403",
"0.5931987",
"0.59300864",
"0.59300864",
"0.58972836",
"0.58769643",
"0.5775113",
"0.57599974",
"0.57518005",
"0.5729448",
"0.5716128",
"0.55899644",
"0.55675846",
"0.5560286",
"0.55284667",
"0.5519592",
"0.5493113",
"0.5477239",
"0.54730177"
] | 0.7746347 | 0 |
Test when geographic extent changes. | def test_log_update_geographic_extent(self):
log_count_init = LoggerHistory.objects.count()
original_geographic_extent = self.project.geographic_extent
self.project.geographic_extent = GEOSGeometry(
'{"type": "Polygon","coordinates":'
'[[[-0.505,51.682],[-0.53,51.327],'
'[0.225,51.323],[0.167,51.667],[-0.505,51.682]]]}')
self.project.save()
log = LoggerHistory.objects.last()
log_count = LoggerHistory.objects.count()
self.assertNotEqual(log.user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(log.project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(log.usergroup, None)
self.assertEqual(log.category, None)
self.assertEqual(log.field, None)
self.assertEqual(log.location, None)
self.assertEqual(log.observation, None)
self.assertEqual(log.comment, None)
self.assertEqual(log.subset, None)
self.assertEqual(log.action, {
'id': 'updated',
'class': 'Project',
'field': 'geographic_extent'})
self.assertEqual(log_count, log_count_init + 1)
history = self.project.history.get(pk=log.historical.get('id'))
self.assertEqual(history.id, self.project.id)
self.assertEqual(history.geographic_extent, original_geographic_extent) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_extent(self):\n if self.lower_left.x > self.upper_right.x:\n dlx = self.lower_left.x\n self.lower_left.x = self.upper_right.x\n self.upper_right.y = dlx\n\n if self.lower_left.y > self.upper_right.y:\n dly = self.lower_left.y\n self.lower_left.y = self.upper_right.y\n self.upper_right.y = dly",
"def test_updated_landusage():\n assert_cached_node(1001, (13.5, 47.5))\n landusage_1001 = query_row(db_conf, 'osm_landusages', -1001)\n # point not in polygon after update\n assert not landusage_1001['geometry'].intersects(merc_point(13.4, 47.5))",
"def is_map_updated(self):\r\n self.old_obs_len =0\r\n if len(self.obs_ls[0])!= self.old_obs_len:\r\n self.old_obs_len =len(self.obs_ls[0])\r\n return True\r\n return False",
"def test_update_zoom_rooms_location_structure(self):\n pass",
"def get_geographic_extent(self, project):\n if project.geographic_extent is not None:\n return json.loads(project.geographic_extent.json)\n else:\n return None",
"def get_extent(self):\n pass",
"def geoextent(self):\r\n return self.series_extent",
"def is_changed(self, new_grid):\n for row in range(self._grid_height):\n for col in range(self._grid_width):\n if self.get_tile(row,col) != new_grid[row][col]:\n return True\n return False",
"def extent(self):\n return self._extent",
"def extent(self):\n rx0 = gxapi.float_ref()\n ry0 = gxapi.float_ref()\n rz0 = gxapi.float_ref()\n rx1 = gxapi.float_ref()\n ry1 = gxapi.float_ref()\n rz1 = gxapi.float_ref()\n self.gxvox.get_area(rx0, ry0, rz0, rx1, ry1, rz1)\n if self.is_depth:\n return gxgm.Point2(((rx0.value, ry0.value, -rz1.value), (rx1.value, ry1.value, -rz0.value)))\n return gxgm.Point2(((rx0.value, ry0.value, rz0.value), (rx1.value, ry1.value, rz1.value)),\n self.coordinate_system)",
"def testgeotargets(self):\r\n dim_geotargets = self.data.geotargets.shape\r\n dim_geovalues = (len(self.data.geovalues[0]), )\r\n assert dim_geotargets == dim_geovalues",
"def needs_extent_refresh(self, product_name: str) -> bool:\n existing_product_summary = self.get_product_summary(product_name)\n if not existing_product_summary:\n # Never been summarised. So, yes!\n return True\n\n most_recent_change = self.find_most_recent_change(product_name)\n has_new_changes = most_recent_change and (\n most_recent_change > existing_product_summary.last_refresh_time\n )\n\n _LOG.debug(\n \"product.last_extent_changes\",\n product_name=product_name,\n last_refresh_time=existing_product_summary.last_refresh_time,\n most_recent_change=most_recent_change,\n has_new_changes=has_new_changes,\n )\n return has_new_changes",
"def test_raster_extrema(self):\n\n for rastername in ['Earthquake_Ground_Shaking_clip.tif',\n 'Population_2010_clip.tif',\n 'shakemap_padang_20090930.asc',\n 'population_padang_1.asc',\n 'population_padang_2.asc']:\n\n filename = '%s/%s' % (TESTDATA, rastername)\n R = read_layer(filename)\n\n # Check consistency of raster\n\n # Use numpy to establish the extrema instead of gdal\n minimum, maximum = R.get_extrema()\n\n # Check that arrays with NODATA value replaced by NaN's agree\n A = R.get_data(nan=False)\n B = R.get_data(nan=True)\n\n assert A.dtype == B.dtype\n assert numpy.nanmax(A - B) == 0\n assert numpy.nanmax(B - A) == 0\n assert numpy.nanmax(numpy.abs(A - B)) == 0\n\n # Check that extrema are OK\n assert numpy.allclose(maximum, numpy.max(A[:]))\n assert numpy.allclose(maximum, numpy.nanmax(B[:]))\n assert numpy.allclose(minimum, numpy.nanmin(B[:]))\n\n # Check that nodata can be replaced by 0.0\n C = R.get_data(nan=0.0)\n msg = '-9999 should have been replaced by 0.0 in %s' % rastername\n assert min(C.flat[:]) != -9999, msg",
"def testEditDistanceBoundaries(self):\n for profile1 in self.profiles:\n for profile2 in self.profiles:\n self.assertTrue(profile1.edit_distance(profile2) <= 1.0 and profile1.edit_distance(profile2) >= 0)",
"def testoptdone(self):\r\n assert self.data.optdone\r\n assert numpy.all(numpy.abs(self.data.geovalues[-1]) <= self.data.geotargets)",
"def testEditDistanceBoundaries(self):\r\n for profile1 in self.profiles:\r\n for profile2 in self.profiles:\r\n self.assertTrue(profile1.edit_distance(profile2) <= 1.0 and profile1.edit_distance(profile2) >= 0)",
"def test_is_mountain_in_range(self):\n self.assertTrue(self.user_location.is_mountain_in_range(self.mountain_one))\n self.assertFalse(self.user_location.is_mountain_in_range(self.mountain_two))",
"def test_measurement(eit_map):\n assert eit_map.measurement.value in [195, 171]",
"def isSetExtentUnits(self):\n return _libsbml.Model_isSetExtentUnits(self)",
"def test_xmax_set(self):\n\t\tdetails = self.watcher.analyze(layers=[17], xmax=-1)\n\t\tactual_alpha = details.alpha.to_numpy()[0]\n\t\texpected_alpha = 3.0\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, delta=0.1 )",
"def test_update_zr_location_profile(self):\n pass",
"def get_extent(self):\n geot = self.geotransform()\n return (geot[0], geot[3] + self.YSize() * geot[5],\n geot[0] + self.XSize() * geot[1], geot[3])",
"def test_geometry_collection_get_bounds():\n geojson_data = {\n \"geometries\": [\n {\n \"coordinates\": [\n [\n [-1, 1],\n [0, 2],\n [-3, 4],\n [2, 0],\n ]\n ],\n \"type\": \"Polygon\",\n },\n ],\n \"type\": \"GeometryCollection\",\n }\n assert folium.GeoJson(geojson_data).get_bounds() == [[0, -3], [4, 2]]",
"def new_occupancies(map_new, map_old, occupancy_threshold):\n # Find the index of the old map origin in the new map\n origin_new = np.array((map_new.info.origin.position.x, map_new.info.origin.position.y))\n origin_old = np.array((map_old.info.origin.position.x, map_old.info.origin.position.y))\n origin_offset = origin_old - origin_new\n origin_indices = np.rint(origin_offset / map_new.info.resolution).astype(int)\n\n if np.any(origin_indices != 0) or \\\n map_new.info.height != map_old.info.height or \\\n map_new.info.width != map_old.info.width:\n # Pad the old map\n x_before = origin_indices[0]\n x_after = map_new.info.width - map_old.info.width - x_before\n y_before = origin_indices[1]\n y_after = map_new.info.height - map_old.info.height - y_before\n paddings = ((np.maximum(0, y_before),\n np.maximum(0, y_after)),\n (np.maximum(0, x_before),\n np.maximum(0, x_after)))\n map_old.data = np.pad(map_old.data, paddings, 'constant', constant_values=-1)\n\n # Clip the old map\n x_clip_before = np.maximum(0, -x_before)\n x_clip_after = map_new.info.width + x_clip_before\n y_clip_before = np.maximum(0, -y_before)\n y_clip_after = map_new.info.height + y_clip_before\n map_old.data = map_old.data[y_clip_before:y_clip_after, x_clip_before:x_clip_after]\n\n # Find points that have changed to occupied\n points = np.argwhere(np.logical_and(\n map_new.data >= occupancy_threshold, \n map_old.data < occupancy_threshold))\n points = np.fliplr(points)\n points = points * map_new.info.resolution\n points[:,0] += map_new.info.origin.position.x\n points[:,1] += map_new.info.origin.position.y\n\n return points",
"def test_coord_preceding_fs(self):",
"def testDirtyRefresh(self):\n \n pass",
"def _is_geo_valid(lat, lng):\n if abs(lat) > 90 or abs(lng) > 180:\n return False\n return True",
"def test_polygon_with_duplicate_nodes_is_valid():\n geom = query_row(db_conf, 'osm_landusages', 30005)['geometry']\n assert geom.is_valid\n assert len(geom.exterior.coords) == 4",
"def test_if_fires_on_zone_leave(self):\n self.hass.states.set('geo_location.entity', 'hello', {\n 'latitude': 32.880586,\n 'longitude': -117.237564,\n 'source': 'test_source'\n })\n self.hass.block_till_done()\n\n assert setup_component(self.hass, automation.DOMAIN, {\n automation.DOMAIN: {\n 'trigger': {\n 'platform': 'geo_location',\n 'source': 'test_source',\n 'zone': 'zone.test',\n 'event': 'leave',\n },\n 'action': {\n 'service': 'test.automation',\n }\n }\n })\n\n self.hass.states.set('geo_location.entity', 'hello', {\n 'latitude': 32.881011,\n 'longitude': -117.234758,\n 'source': 'test_source'\n })\n self.hass.block_till_done()\n\n self.assertEqual(1, len(self.calls))",
"def map_extent(input_raster):\n\n gdal.UseExceptions()\n raster = gdal.Open(input_raster)\n raster_geotransform = raster.GetGeoTransform()\n raster_extent = (raster_geotransform[0],\n raster_geotransform[0]\n + raster.RasterXSize * raster_geotransform[1],\n raster_geotransform[3]\n + raster.RasterYSize * raster_geotransform[5],\n raster_geotransform[3])\n\n return raster_extent"
] | [
"0.5910318",
"0.5763293",
"0.556814",
"0.5436953",
"0.5405711",
"0.53909826",
"0.53579795",
"0.53464943",
"0.5327532",
"0.53197294",
"0.52521455",
"0.5181637",
"0.5171861",
"0.51245767",
"0.5123446",
"0.51189536",
"0.50938094",
"0.5072819",
"0.50627923",
"0.5045673",
"0.5040845",
"0.5033261",
"0.50269926",
"0.5021896",
"0.5011904",
"0.50042695",
"0.49883035",
"0.4987735",
"0.4986151",
"0.49849498"
] | 0.6644449 | 0 |
Test when multiple model fields changes. | def test_log_update_multiple_fields(self):
log_count_init = LoggerHistory.objects.count()
original_isprivate = self.project.isprivate
original_islocked = self.project.islocked
self.project.isprivate = False
self.project.islocked = True
self.project.save()
log_count = LoggerHistory.objects.count()
self.assertEqual(log_count, log_count_init + 2)
logs = LoggerHistory.objects.all().order_by('-pk')[:2]
# 1st changed field
self.assertNotEqual(logs[1].user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(logs[1].project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(logs[1].category, None)
self.assertEqual(logs[1].field, None)
self.assertEqual(logs[1].action, {
'id': 'updated',
'class': 'Project',
'field': 'isprivate',
'value': str(self.project.isprivate)})
history_2 = self.project.history.get(pk=logs[1].historical.get('id'))
self.assertEqual(history_2.id, self.project.id)
self.assertEqual(history_2.isprivate, original_isprivate)
self.assertEqual(history_2.islocked, original_islocked)
# 2nd changed field
self.assertNotEqual(logs[0].user, {
'id': str(self.user.id),
'display_name': self.user.display_name})
self.assertEqual(logs[0].project, {
'id': str(self.project.id),
'name': self.project.name})
self.assertEqual(logs[0].category, None)
self.assertEqual(logs[0].field, None)
self.assertEqual(logs[0].action, {
'id': 'updated',
'class': 'Project',
'field': 'islocked',
'value': str(self.project.islocked)})
history_1 = self.project.history.get(pk=logs[0].historical.get('id'))
self.assertEqual(history_1.id, self.project.id)
self.assertEqual(history_1.isprivate, original_isprivate)
self.assertEqual(history_1.islocked, original_islocked)
# History entry is only one per save
self.assertEqual(history_1, history_2) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_fields_updated_with_computed(self):\n pass",
"def _validate_fields(self, change_fields):\n pass",
"def _field_was_changed(self):\n field_map = self._field_map\n for field in field_map.itervalues():\n if field.was_changed():\n return True\n return False",
"def test_update_multiple(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 14}, age=12)\n assert n_updated == 2\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 14\n assert andy in items\n assert pandy in items\n assert candy in items",
"def test_update_case(self):\n pass",
"def test_update_one(self):\n pass",
"def test_SameNumberOfFields(self):\n pass",
"def compare_fields(command, old_object, new_object, fields, update=False):\n differences = []\n for field in fields:\n if not compare_values_for_equality(getattr(old_object, field),\n getattr(new_object, field)):\n differences.append('{} previously recorded as {}, '\n 'now {}'\n .format(field,\n str(getattr(old_object, field)),\n str(getattr(new_object, field))))\n if update:\n setattr(old_object, field, getattr(new_object, field))\n old_object.save()\n\n if differences:\n command.stderr.write(\n 'WARNING: Record {} had these changes: {}\\n'\n .format(str(new_object), str([d for d in differences])))\n\n if update:\n command.stderr.write(\n '\\tThe database was updated to reflect the changes\\n\\n')\n else:\n command.stderr.write(\n '\\tThe database was NOT updated to reflect the changes\\n\\n')\n return False\n\n else:\n return True",
"def test_update_list_changes_data(qtbot):\n # Given\n model = SourcesModel()\n assert model.rowCount() == 0\n\n sources = []\n source = Source(\"I001\", \"Test\", \"Person\", \"Pub\", \"Abbr\")\n sources.append(source)\n\n # When\n with qtbot.waitSignals([model.modelAboutToBeReset, model.modelReset]):\n model.update_list(sources)\n\n # Then\n assert model.rowCount() == 1",
"def isModelDirty(self):\n \n pass",
"def test_update_attribute_data(self):\n pass",
"def _changeable_fields(self, request, obj):\n return not obj or not self.is_readonly(request, obj)",
"def _update_allowed_fields(self) -> list:\n raise NotImplementedError('Each model has to have its list of update allowed fields')",
"def test_update_all(test_store, andy, pandy, candy):\n n_updated = test_store.update(fields={\"age\": 13})\n assert n_updated == 3\n items = list(test_store.get_by())\n\n andy.age = pandy.age = 13\n assert andy in items\n assert pandy in items\n assert candy in items",
"def testOnParamsChanged(self, widget):\n spy_signal = QtSignalSpy(widget, widget.modelModified)\n\n # number of rows. default=1\n assert widget.tblParams.rowCount() == 1\n\n # number of columns. default=2\n assert widget.tblParams.columnCount() == 2\n\n # Change the param\n new_param = \"shoot\"\n widget.tblParams.setItem(0,0,QTableWidgetItem(new_param))\n #widget.tblParams.editingFinished.emit()\n\n\n # Check the signal\n assert spy_signal.count() == 1\n\n # model dict updated\n assert widget.model['parameters'] == {0: (new_param, None)}\n\n # Change the value\n new_value = \"BOOM\"\n widget.tblParams.setItem(0,1,QTableWidgetItem(new_value))\n\n # Check the signal\n assert spy_signal.count() == 2\n\n # model dict updated\n assert widget.model['parameters'] == {0: (new_param, new_value)}\n\n # See that the number of rows increased\n assert widget.tblParams.rowCount() == 2",
"def partial_change(self):\n return self.attempted_change() and not all(self._get_field_data())",
"def mark_field_changed(self, name):\n self.assert_known_field(name)\n assert not self._deleted # Once a model is deleted, we don't expect attributes to change.\n self._changed_fields.add(name)",
"def test_publish_records_fields(self):\n draft = self._get_draft()\n review_request = draft.review_request\n\n old_summary = review_request.summary\n old_description = review_request.description\n old_testing_done = review_request.testing_done\n old_branch = review_request.branch\n old_bugs = review_request.get_bug_list()\n\n draft.summary = 'New summary'\n draft.description = 'New description'\n draft.testing_done = 'New testing done'\n draft.branch = 'New branch'\n draft.bugs_closed = '12, 34, 56'\n draft.target_people.add(review_request.submitter)\n\n new_bugs = draft.get_bug_list()\n\n changes = draft.publish()\n fields = changes.fields_changed\n\n self.assertIn('summary', fields)\n self.assertIn('description', fields)\n self.assertIn('testing_done', fields)\n self.assertIn('branch', fields)\n self.assertIn('bugs_closed', fields)\n\n old_bugs_norm = set([(bug,) for bug in old_bugs])\n new_bugs_norm = set([(bug,) for bug in new_bugs])\n\n self.assertEqual(fields['summary']['old'][0], old_summary)\n self.assertEqual(fields['summary']['new'][0], draft.summary)\n self.assertEqual(fields['description']['old'][0], old_description)\n self.assertEqual(fields['description']['new'][0], draft.description)\n self.assertEqual(fields['testing_done']['old'][0], old_testing_done)\n self.assertEqual(fields['testing_done']['new'][0], draft.testing_done)\n self.assertEqual(fields['branch']['old'][0], old_branch)\n self.assertEqual(fields['branch']['new'][0], draft.branch)\n self.assertEqual(set(fields['bugs_closed']['old']), old_bugs_norm)\n self.assertEqual(set(fields['bugs_closed']['new']), new_bugs_norm)\n self.assertEqual(set(fields['bugs_closed']['removed']), old_bugs_norm)\n self.assertEqual(set(fields['bugs_closed']['added']), new_bugs_norm)",
"def test_update_record(self):\n pass",
"def test_update_values(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n\n # independently save over a new count value, unknown to original instance\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n m1.count = 6\n m1.save()\n\n # update the text, and call update\n m0.update(text='monkey land')\n self.assertEqual(m0.text, 'monkey land')\n\n # database should reflect both updates\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\n self.assertEqual(m2.count, m1.count)\n self.assertEqual(m2.text, m0.text)",
"def test_update_values(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n\r\n # independently save over a new count value, unknown to original instance\r\n m1 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n m1.count = 6\r\n m1.save()\r\n\r\n # update the text, and call update\r\n m0.update(text='monkey land')\r\n self.assertEqual(m0.text, 'monkey land')\r\n\r\n # database should reflect both updates\r\n m2 = TestUpdateModel.get(partition=m0.partition, cluster=m0.cluster)\r\n self.assertEqual(m2.count, m1.count)\r\n self.assertEqual(m2.text, m0.text)",
"def test_teams_partial_update(self):\n pass",
"def test_prep_field_properties(self):\n original_data = self.form.data\n test_data = original_data.copy()\n # modify values in data\n test_data._mutable = False\n self.form.data = test_data\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n # modify fields\n self.form.fields = test_fields\n test_fields_info = {name: field.__dict__.copy() for name, field in test_fields.items()}\n original_get_overrides = self.form.get_overrides\n def skip_overrides(): return {}\n self.form.get_overrides = skip_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = self.alt_field_info\n self.form.test_condition_response = True\n expected_fields_info = test_fields_info.copy()\n result_fields = self.form.prep_fields()\n result_fields_info = {name: field.__dict__.copy() for name, field in result_fields.items()}\n modified_info = self.alt_field_info['alt_test_feature']\n first_label = modified_info['first']['label']\n first_initial = modified_info['first']['initial']\n last_initial = modified_info['last']['initial']\n for name, opts in modified_info.items():\n expected_fields_info[name].update(opts)\n\n self.assertEqual(first_label, result_fields['first'].label)\n self.assertEqual(first_initial, result_fields['first'].initial)\n self.assertEqual(last_initial, result_fields['last'].initial)\n for key, val in expected_fields_info.items():\n self.assertEqual(val, result_fields_info[key])\n self.assertDictEqual(expected_fields_info, result_fields_info)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides",
"def test_updating_multiple_records_through_filter_with_arg_value(self, test_domain):\n identifier1 = uuid4()\n identifier2 = uuid4()\n identifier3 = uuid4()\n identifier4 = uuid4()\n test_domain.repository_for(Person)._dao.create(\n id=identifier1, first_name=\"Athos\", last_name=\"Musketeer\", age=2\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier2, first_name=\"Porthos\", last_name=\"Musketeer\", age=3\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier3, first_name=\"Aramis\", last_name=\"Musketeer\", age=4\n )\n test_domain.repository_for(Person)._dao.create(\n id=identifier4, first_name=\"dArtagnan\", last_name=\"Musketeer\", age=5\n )\n\n # Perform update\n updated_count = (\n test_domain.repository_for(Person)\n ._dao.query.filter(age__gt=3)\n .update_all({\"last_name\": \"Fraud\"})\n )\n\n # Query and check if only the relevant records have been updated\n assert updated_count == 2\n\n u_person1 = test_domain.repository_for(Person)._dao.get(identifier1)\n u_person2 = test_domain.repository_for(Person)._dao.get(identifier2)\n u_person3 = test_domain.repository_for(Person)._dao.get(identifier3)\n u_person4 = test_domain.repository_for(Person)._dao.get(identifier4)\n assert u_person1.last_name == \"Musketeer\"\n assert u_person2.last_name == \"Musketeer\"\n assert u_person3.last_name == \"Fraud\"\n assert u_person4.last_name == \"Fraud\"",
"def test_all_field_dependencies_model(self, all_field_dependencies):\n # Loop over the dependencies\n for dependency in all_field_dependencies:\n api_keys = dependency.keys()\n # Check if the number of keys is the same between the model and the API\n assert len(self.model_keys) == len(api_keys)\n # Check of the keys and types of values are the same between the model and the API\n for key in self.model_keys:\n assert key in api_keys\n assert type(dependency[key]) in field_dep_model[key]",
"def test_update_other_fields(auth_client):\n account_ids = prep_database(auth_client.sqla)\n\n # For each of the accounts, grab the current value of the \"other\" fields.\n expected_by_id = {}\n for account_id in account_ids:\n current_account = auth_client.sqla.query(Account).filter_by(id=account_id).first()\n expected_by_id[account_id] = {\n 'username': current_account.username,\n 'active': current_account.active\n }\n\n for account_id in account_ids:\n payload = {}\n\n if flip():\n # Randomly update the username.\n new_username = username_factory()\n expected_by_id[account_id]['username'] = new_username\n payload['username'] = new_username\n if flip():\n # Randomly update the active flag.\n new_active = flip()\n expected_by_id[account_id]['active'] = new_active\n payload['active'] = new_active\n\n # At this point, we'll have constructed a payload that might have zero of more\n # of the fields. This lets us test various combinations of update requests.\n # The expected_by_id dictionary stores the values we expect to see in the database,\n # whether the original value retrieve earlier or the newly updated on just\n # created.\n\n # It's possible that none of the fields will have been selected for update,\n # which doesn't make much sense, but we'll still test for that possibility.\n\n resp = auth_client.patch(url_for('people.update_account', account_id=account_id), json=payload)\n assert resp.status_code == 200\n\n for account_id in account_ids:\n updated_account = auth_client.sqla.query(Account).filter_by(id=account_id).first()\n assert updated_account is not None\n assert updated_account.username == expected_by_id[account_id]['username']\n assert updated_account.active == expected_by_id[account_id]['active']",
"def test_changedata(self):\n p = model.Person(firstname=\"Tobias\", lastname=\"Thelen\",\n email=\"[email protected]\", hobbies=[\"singen\",\"springen\",\"fröhlichsein\"])\n id = p.store()\n\n p = model.Person(id=id)\n p['firstname'] = \"Walter\"\n p.store()\n\n p2 = model.Person(id=id)\n self.assertEqual(p2.firstname, \"Walter\")\n self.assertEqual(p2.lastname, \"Thelen\")",
"def field_changes(self, field_changes):\n\n self._field_changes = field_changes",
"def test_update(self):\n # this is tested graphically, as it is UI\n pass",
"def testDirtyRefresh(self):\n \n pass"
] | [
"0.69400656",
"0.6714582",
"0.64557713",
"0.62088406",
"0.6202799",
"0.61854094",
"0.6156362",
"0.61393243",
"0.6132215",
"0.61121684",
"0.6104089",
"0.60721403",
"0.6061349",
"0.6043818",
"0.6033619",
"0.6031339",
"0.6025014",
"0.60222876",
"0.6019186",
"0.60170436",
"0.6014297",
"0.60115135",
"0.6005045",
"0.6003034",
"0.59969485",
"0.59554356",
"0.59303117",
"0.59296644",
"0.5927785",
"0.5914261"
] | 0.693425 | 1 |
kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, **kwc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = False\n\n self._gen_template_attrs()",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = ''\n\n self._gen_template_attrs()",
"def _copy_kwargs(self, **kwargs):\n ns = self.__dict__\n for attr, kw in {'_engine': 'engine', '_format': 'format'}.items():\n assert kw not in kwargs\n if attr in ns:\n kwargs[kw] = ns[attr]\n return super()._copy_kwargs(**kwargs)",
"def _store_interpolator_kwargs(self, **kwargs):\n self._interpolator_kwargs = copy.deepcopy(kwargs)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='VectorValue', **kwc)\n\n self._gen_template_attrs()",
"def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)",
"def kwargs (self):\n return copy.deepcopy (self._kwargs)",
"def set_params(self, **kwargs):\n ...",
"def __init__(self, **kwargs):\n self.__kwargs = kwargs",
"def set_params(self, **kwargs) -> NoReturn:\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def params(self, **kwargs):\n return kwargs",
"def __copy__(self, *args, **kwargs): # real signature unknown\n pass",
"def __copy__(self, *args, **kwargs): # real signature unknown\n pass",
"def set_parameters(self, **kwargs):\n\n invalid_params = set(self.parameter_names).difference(kwargs.keys())\n if invalid_params:\n raise ValueError(\n \"unknown parameters: {}\".format(\", \".join(invalid_params))) \n \n for parameter_name, value in kwargs.items():\n setattr(self, \"_{}\".format(parameter_name), value)\n\n return kwargs",
"def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def kwargs(kwargs):\n run_kwargs(kwargs)",
"def add_kwargs():\n pass",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='RecordValue', **kwc)\n\n self._gen_template_attrs()",
"def set_params(self, *arg):\n pass",
"def set_kwargs(self, kwargs):\n accept = {\"command\" : None,\n \"clicked\" : False,\n \"unclick\" : False,\n \"active\" : True,\n \"key_bindings\" : []}\n for kwarg in kwargs:\n if kwarg in accept:\n accept[kwarg] = kwargs[kwarg]\n self.__dict__.update(accept)",
"def __init__(self, **kwargs: Any):",
"def __init__(**params):",
"def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )"
] | [
"0.69261605",
"0.6647647",
"0.6435878",
"0.6260152",
"0.61408424",
"0.6130079",
"0.6130027",
"0.6129116",
"0.6115548",
"0.6056017",
"0.60557586",
"0.60557586",
"0.60557586",
"0.60557586",
"0.60557586",
"0.60557586",
"0.60557586",
"0.6039004",
"0.59231293",
"0.59231293",
"0.58545136",
"0.578876",
"0.577496",
"0.5768341",
"0.57515794",
"0.5748329",
"0.5736669",
"0.57356095",
"0.5733957",
"0.5729463"
] | 0.80070865 | 1 |
ParameterType for Quantities (float, int, etc) value_encoding The intrinsic type of the Quantity kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, value_encoding=None, uom=None, constraint=None, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, value_class='NumericValue', **kwc)
if value_encoding is None:
self._value_encoding = np.dtype('float32').str
else:
try:
dt = np.dtype(value_encoding)
if dt.isbuiltin not in (0,1):
raise TypeError('\'value_encoding\' must be a valid numpy dtype: {0}'.format(value_encoding))
if dt in UNSUPPORTED_DTYPES:
raise TypeError('\'value_encoding\' {0} is not supported by H5py: UNSUPPORTED types ==> {1}'.format(value_encoding, UNSUPPORTED_DTYPES))
self._value_encoding = dt.str
except TypeError:
raise
self._template_attrs['uom'] = uom or 'unspecified'
self._template_attrs['constraint'] = constraint
self._gen_template_attrs() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, base_type=None, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='FunctionValue', **kwc)\n if base_type is not None and not isinstance(base_type, QuantityType):\n raise TypeError('\\'base_type\\' must be an instance of QuantityType')\n\n self.base_type = base_type or QuantityType()\n\n self._template_attrs.update(self.base_type._template_attrs)\n\n# self._template_attrs['value_encoding'] = '|O8'\n self._template_attrs['fill_value'] = None\n\n self._gen_template_attrs()",
"def __init__(self, base_type=None, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='ConstantValue', **kwc)\n if base_type is not None and not isinstance(base_type, QuantityType):\n raise TypeError('\\'base_type\\' must be an instance of QuantityType')\n\n self.base_type = base_type or QuantityType()\n\n self._template_attrs.update(self.base_type._template_attrs)\n# self._template_attrs['value_encoding'] = '|O8'\n self._template_attrs['fill_value'] = None\n\n self._gen_template_attrs()",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractParameterType.__init__(self, **kwc)\n\n self.value_encoding = np.dtype(object).str\n self._template_attrs['fill_value'] = None",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = False\n\n self._gen_template_attrs()",
"def __init__(self, categories, key_value_encoding=None, key_fill_value=None, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='CategoryValue', **kwc)\n\n if not isinstance(categories, dict) or len(categories.keys()) == 0:\n raise TypeError('\\'categories\\' must be of type dict and cannot be empty: {0}'.format(categories))\n\n if key_value_encoding is None:\n # Get the type of the first key\n key_value_encoding = np.asanyarray(categories.keys()[0]).dtype.str\n else:\n key_value_encoding = np.dtype(key_value_encoding).str\n\n want_kind=np.dtype(key_value_encoding).kind\n if want_kind not in self.SUPPORTED_CATETEGORY_KEY_KINDS:\n raise TypeError('\\'key_value_encoding\\' is not supported; supported np.dtype.kinds: {0}'.format(self.SUPPORTED_CATETEGORY_KEY_KINDS))\n\n for k in categories.keys():\n if np.asanyarray(k).dtype.kind != want_kind:\n raise ValueError('A key in \\'categories\\' ({0}) does not match the specified \\'key_value_encoding\\' ({1})'.format(k, key_value_encoding))\n\n if want_kind == 'S':\n self.base_type = ArrayType()\n else:\n self.base_type = QuantityType(value_encoding=key_value_encoding)\n\n self._template_attrs['categories'] = categories\n self._gen_template_attrs()",
"def __init__(self, quality=None, nilValues=None, **kwargs):\n kwc=kwargs.copy()\n AbstractParameterType.__init__(self, **kwc)\n self._template_attrs['quality'] = quality\n self._template_attrs['nilValues'] = nilValues\n self._template_attrs['fill_value'] = -9999",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = ''\n\n self._gen_template_attrs()",
"def _quantize_dequantize_params(self, inputs=None) -> Dict[str, torch.Tensor]:\n quantized_params = dict()\n shadow_params = {name: param.detach().clone() for name, param in self.named_parameters(recurse=False)}\n\n if self._mode != QcQuantizeOpMode.LEARN_ENCODINGS:\n quantizer_param_map = defaultdict(list)\n for name, param in self.named_parameters(recurse=False):\n quantizer_param_map[self._param_quantizers[name]].append((name, param))\n\n for param_quantizer, param_list in quantizer_param_map.items():\n # If we are in training mode with quant-sim nodes,\n # then we want to calculate encodings for the parameters in every pass\n if self.training or param_quantizer.encoding is None:\n param_quantizer.reset_encoding_stats()\n for _, param in param_list:\n param_quantizer.update_encoding_stats(param.data)\n param_quantizer.compute_encoding()\n\n for name, param in param_list:\n quantized_params[name] = param.data = self._param_quantize_dequantize(param.clone(), param_quantizer)\n else:\n encoding_list_for_params = []\n for name, _ in self.get_named_parameters():\n # Create a list of encoding parameters for params\n quantizer = self.param_quantizers[name]\n if quantizer.enabled:\n # if param uses a group quantizer remap to group quantizer min/max encoding params.\n if quantizer in self._grouped_quantizers.values():\n name, *_ = [n for n, q in self._grouped_quantizers.items() if q == quantizer]\n encoding_list_for_params.append(getattr(self, name + '_encoding_min'))\n encoding_list_for_params.append(getattr(self, name + '_encoding_max'))\n\n # Quantize the parameters\n inputs = ParameterQuantizer.apply(inputs, self, *encoding_list_for_params)\n\n # clone() the outputs of Custom function to avoid incorrect gradient calculation for in-place modification\n # of view (view is created since Custom function's forward return input as-is)\n inputs = inputs.clone()\n quantized_params = {name: param.clone() for name, param in self.named_parameters(recurse=False)\n if '_encoding_' not in name}\n\n yield quantized_params, inputs\n\n for name, param in self.named_parameters(recurse=False):\n if name in shadow_params:\n param.data.copy_(shadow_params[name].data)",
"def quantize(self, some_tensor, bits=32) -> dict:\n raise NotImplementedError(\"To be Overidden by derived class\")",
"def _quant_unimplemented(self, *input: Any) -> None:\n raise NotImplementedError",
"def SBO_isQuantitativeParameter(*args):\n return _libsbml.SBO_isQuantitativeParameter(*args)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='VectorValue', **kwc)\n\n self._gen_template_attrs()",
"def isQuantitativeParameter(*args):\n return _libsbml.SBO_isQuantitativeParameter(*args)",
"def quantizer(self, nbits: int):\n raise NotImplementedError",
"def __init__(self):\n super(ASYMMETRIC, self).__init__(quant_type=Constants.QZ_ASYMMETRIC)",
"def quantize_scope(*args):\n quantization_objects = {\n 'QuantizeAwareActivation':\n vitis_quantize_aware_activation.QuantizeAwareActivation,\n 'NoQuantizeActivation':\n vitis_quantize_aware_activation.NoQuantizeActivation,\n 'QuantizeWrapper':\n vitis_quantize_wrapper.QuantizeWrapper,\n 'CustomOpWrapper':\n vitis_custom_wrapper.CustomOpWrapper,\n }\n quantization_objects.update(vitis_quantizers._types_dict())\n quantization_objects.update(vitis_quantize_configs._types_dict())\n quantization_objects.update(vitis_quantize_layer._types_dict())\n quantization_objects.update(vitis_activation._types_dict())\n quantization_objects.update(vitis_pooling._types_dict())\n\n return tf.keras.utils.custom_object_scope(*(args + (quantization_objects,)))",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='ArrayValue', **kwc)\n\n self._gen_template_attrs()",
"def getInputSpecification(cls):\n ## This will replace the lines above\n inSpec= super(ValueDuration, cls).getInputSpecification()\n inSpec.addSub(InputData.parameterInputFactory('target',\n contentType=InputTypes.StringListType,\n strictMode=True))\n inSpec.addSub(InputData.parameterInputFactory('bins',\n contentType=InputTypes.IntegerType))\n return inSpec",
"def quantifyValues( values, scale):\r\n\r\n\tfor v in range(len(values)):\r\n\t\tvalues[v] = st.quantify( values[v], scale)\r\n\treturn values",
"def getInputSpecification(cls):\n inputSpecification = super(Weibull, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"low\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"k\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"lambda\", contentType=InputTypes.FloatType))\n\n return inputSpecification",
"def getInputSpecification(cls):\n inputSpecification = super(Exponential, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"low\", contentType=InputTypes.FloatType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"lambda\", contentType=InputTypes.FloatType))\n\n return inputSpecification",
"def getInputSpecification(cls):\n inputSpecification = super(Binomial, cls).getInputSpecification()\n inputSpecification.addSub(InputData.parameterInputFactory(\"n\", contentType=InputTypes.IntegerType))\n inputSpecification.addSub(InputData.parameterInputFactory(\"p\", contentType=InputTypes.FloatType))\n\n return inputSpecification",
"def __init__(__self__, *,\n quantity: Optional[float] = None,\n unit: Optional[str] = None):\n if quantity is not None:\n pulumi.set(__self__, \"quantity\", quantity)\n if unit is not None:\n pulumi.set(__self__, \"unit\", unit)"
] | [
"0.6628984",
"0.64694905",
"0.63277125",
"0.63277125",
"0.63277125",
"0.63277125",
"0.63277125",
"0.63277125",
"0.63277125",
"0.6248839",
"0.58616287",
"0.5837818",
"0.58370155",
"0.5768371",
"0.5614411",
"0.5582864",
"0.5551495",
"0.55346626",
"0.551115",
"0.55017114",
"0.5469695",
"0.5403598",
"0.5289223",
"0.5262847",
"0.52282274",
"0.51581603",
"0.5122425",
"0.5026396",
"0.5016699",
"0.50111043"
] | 0.71359324 | 0 |
kwargs Additional keyword arguments are copied and the copy is passed up to AbstractSimplexParameterType; see documentation for that class for details | def __init__(self, **kwargs):
kwc=kwargs.copy()
AbstractSimplexParameterType.__init__(self, **kwc) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = False\n\n self._gen_template_attrs()",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractSimplexParameterType.__init__(self, **kwc)\n\n self._template_attrs['fill_value'] = ''\n\n self._gen_template_attrs()",
"def _copy_kwargs(self, **kwargs):\n ns = self.__dict__\n for attr, kw in {'_engine': 'engine', '_format': 'format'}.items():\n assert kw not in kwargs\n if attr in ns:\n kwargs[kw] = ns[attr]\n return super()._copy_kwargs(**kwargs)",
"def _store_interpolator_kwargs(self, **kwargs):\n self._interpolator_kwargs = copy.deepcopy(kwargs)",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='VectorValue', **kwc)\n\n self._gen_template_attrs()",
"def set_parameters(self, **kwargs):\n self.__multi_layer_perceptron.set_params(**kwargs)",
"def kwargs (self):\n return copy.deepcopy (self._kwargs)",
"def set_params(self, **kwargs):\n ...",
"def __init__(self, **kwargs):\n self.__kwargs = kwargs",
"def set_params(self, **kwargs) -> NoReturn:\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def copy(self, *args, **kwargs): # real signature unknown\n pass",
"def params(self, **kwargs):\n return kwargs",
"def __copy__(self, *args, **kwargs): # real signature unknown\n pass",
"def __copy__(self, *args, **kwargs): # real signature unknown\n pass",
"def set_parameters(self, **kwargs):\n\n invalid_params = set(self.parameter_names).difference(kwargs.keys())\n if invalid_params:\n raise ValueError(\n \"unknown parameters: {}\".format(\", \".join(invalid_params))) \n \n for parameter_name, value in kwargs.items():\n setattr(self, \"_{}\".format(parameter_name), value)\n\n return kwargs",
"def set_params(self,**kwargs):\n for key in kwargs:\n setattr(self, key, kwargs[key])",
"def kwargs(kwargs):\n run_kwargs(kwargs)",
"def add_kwargs():\n pass",
"def __init__(self, **kwargs):\n kwc=kwargs.copy()\n AbstractComplexParameterType.__init__(self, value_class='RecordValue', **kwc)\n\n self._gen_template_attrs()",
"def set_params(self, *arg):\n pass",
"def set_kwargs(self, kwargs):\n accept = {\"command\" : None,\n \"clicked\" : False,\n \"unclick\" : False,\n \"active\" : True,\n \"key_bindings\" : []}\n for kwarg in kwargs:\n if kwarg in accept:\n accept[kwarg] = kwargs[kwarg]\n self.__dict__.update(accept)",
"def __init__(self, **kwargs: Any):",
"def __init__(**params):",
"def __init__( self, **kwargs ):\n self.__dict__.update( kwargs )"
] | [
"0.6926334",
"0.66476643",
"0.64352614",
"0.6260937",
"0.61402684",
"0.613057",
"0.6130397",
"0.61296386",
"0.6116005",
"0.60568196",
"0.6056564",
"0.6056564",
"0.6056564",
"0.6056564",
"0.6056564",
"0.6056564",
"0.6056564",
"0.6040087",
"0.592367",
"0.592367",
"0.5854392",
"0.5789069",
"0.5775133",
"0.57695",
"0.5750284",
"0.5748876",
"0.5737203",
"0.57369834",
"0.5734285",
"0.57305765"
] | 0.8007624 | 0 |
Given an order, updates the order with prevailing tax rules onto the order's credit attribute. Then it returns | def apply_tax(order_obj):
tax_rule = taxes.get()
all_credits = order_obj.credits
other_credit = filter(lambda x: x["coll_name"] != taxes.TaxRule.coll_name(), all_credits)
if tax_rule is not None:
order_obj.credits = other_credit + [{
"obj_id": tax_rule._id,
"coll_name": taxes.TaxRule.coll_name(),
"amount": taxes.amount(tax_rule, order_obj),
}]
else:
order_obj.credits = other_credit | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order",
"def fix_order_price(order: Order):\n if not order.price:\n LOG.warning('Price of order %s was None', order.id)\n fix = get_closed_order()\n if fix.id == order.id and fix.price:\n order.price = fix.price\n del fix\n return order",
"def _compute_tax(self):\n for line in self:\n line.tax = (line.amount_untaxed * 14) / 100",
"def modify_order(self, order, price, size):\r\n request_params = {\r\n \"price\": str(price),\r\n \"size\": str(size)\r\n }\r\n\r\n method = self.private_endpoints['modify_order']['method']\r\n url = self.base_url + self.private_endpoints['modify_order']['url'].format(orderId=order)\r\n req = requests.request(method, url, headers=self.get_auth_headers(nonce=True), json=request_params)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return True\r\n else:\r\n return res",
"def modify_price(self, price):\n if price is not None and self.is_cancellable:\n log.info(\"bo#%s: modify price (pending) order \" % self.ticket)\n not_implemented_error(\"Can't modify price for now (only for pending orders which wasn't triggered\")\n order_id = self.order_id_master\n cancel_order(order_id) # DANGEROUS! it should be atomic operation!\n #style = self.style\n #if self.is_limit:\n #elif self.is_stop:\n #elif self.is_stop_limit\n #order_id = order(self.symbol, self.volume, style=new_style))\n \n else:\n return",
"def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill",
"def tax(bill):\n bill *= 1.08\n print \"With tax: %f\" % bill\n return bill",
"def prepare_order(acct, order):\n myaddr = (acct.address).lower()\n order[\"makerAddress\"] = myaddr\n order_struct = jsdict_order_to_struct(order) \n sig = _sign_order(acct, order_struct)\n order_struct[\"signature\"] = sig\n js_order = order_to_jsdict(order_struct)\n js_order[\"exchangeAddress\"] = exchangeAddress\n return js_order",
"def tax(bill):\r\n bill *= 1.08\r\n print(\"With tax: %f\" % bill)\r\n return bill",
"def update_orders(comp, order, user_correct, payment_id):\n users_orders = []\n for item in order.items.all():\n users_orders.append(item.id)\n item.is_paid = True\n item.save()\n order.related_competition = comp\n order.payment_id = payment_id\n order.order_date = timezone.now()\n order.answer_correct = user_correct\n order.ordered = True\n order.save()\n return order",
"def add_tax(self,tax):\n return self.price + (self.price * tax)",
"def tax(bill):\n bill *= 1.08\n print \"With tax: %.2f\" % bill\n return bill",
"def update_on_save(sender, instance, created, **kwargs):\n instance.order.update_grand_total()",
"def _amount_all(self):\n for order in self:\n amount_untaxed = amount_tax = 0.0\n order_amount_total = 0.0\n for line in order.order_line:\n amount_untaxed += line.price_subtotal\n amount_tax += line.price_tax\n self_amount_total = amount_untaxed + amount_tax\n if not order.discount_fixed_percent:\n order_amount_total = self_amount_total\n if order.discount_fixed_percent == 'Percent':\n order_amount_total = self_amount_total * (1 - (order.discount or 0.0) / 100.0)\n if order.discount_fixed_percent == 'Fixed':\n order_amount_total = self_amount_total - order.discount_value\n order.update({\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\n 'amount_before_disc': amount_untaxed + amount_tax,\n 'amount_total': order_amount_total,\n })",
"def replace_order(access_token,order_ID,json_request):\r\n orders_url = 'https://api.tdameritrade.com/v1/accounts/{}/orders/{}'.format(TDAuth_Info.account_num,order_ID)\r\n\r\n #The header for placing in order needs to define the input type (json)\r\n headers = {'Authorization':'Bearer {}'.format(access_token),\r\n 'Content-Type':'application/json'}\r\n\r\n #Post the order on TD Ameritrade and check the response\r\n replace_order_response=requests.put(url=orders_url,headers=headers,json=json_request)\r\n\r\n return replace_order_response",
"def _amount_all(self):\r\n for order in self:\r\n amount_untaxed = amount_tax = amount_discount = timbre = 0.0\r\n for line in order.order_line:\r\n amount_untaxed += line.price_subtotal\r\n if line.product_id.timbre_fiscal:\r\n amount_tax += line.price_tax - 0.60\r\n timbre = 0.60\r\n else :\r\n amount_tax += line.price_tax\r\n amount_discount += (line.product_uom_qty * line.price_unit * line.discount)/100\r\n order.update({\r\n 'amount_untaxed': order.pricelist_id.currency_id.round(amount_untaxed),\r\n 'amount_tax': order.pricelist_id.currency_id.round(amount_tax),\r\n 'amount_discount': order.pricelist_id.currency_id.round(amount_discount),\r\n 'price_total_no_discount': amount_untaxed + amount_discount,\r\n 'timbre': timbre,\r\n 'amount_total': amount_untaxed + amount_tax + timbre,\r\n })",
"def calculate(self, order):\n pass",
"def shopping_cart_modify_order_post(self, order_no, order_type, sales_channel_no, owner_email, owner_name,\n owner_gender, owner_profile_id, owner_mobile, owner_birthday, owner_city,\n owner_district, owner_zipcode, owner_address, licensing_location, licensing_location_id,\n license_type, payment_type, is_payment_change, document_fee_payer, scooter_model,\n agency_fees=-1, is_receive_award=-1, token=None, hash_id=None):\n self.init.auth(token, hash_id)\n\n products = [\n documentation_fee\n ]\n products += self._get_scooter_information_payload(scooter_model)\n payments = self._get_loan_payment(payment_type)\n\n data = {\n \"Bag\": 1,\n \"UserId\": None,\n \"OrderNo\": order_no,\n \"OrderType\": order_type,\n \"SalesChannelNo\": sales_channel_no,\n \"SalesChannelName\": None,\n \"SalesStoreNo\": None,\n \"SalesStoreName\": None,\n \"DiscountPlanId\": None,\n \"EmployeeNo\": None,\n \"CheckEmployeeNo\": None,\n \"Recommended\": None,\n \"Remark\": None,\n \"Invoice\": {\n \"Numbers\": None,\n \"GUINumbers\": None,\n \"Donation\": None,\n \"DonationNumbers\": None,\n \"Date\": None\n },\n \"Discount\": [],\n \"ProjectActivity\": [\n {\n \"ProjectCode\": \"EVT217\",\n \"ProjectName\": \"購買 Gogoro 全車系贈送 Gogoro 極輕都會風雨衣\",\n \"ProjectDescription\": \"購買 Gogoro 全車系贈送 Gogoro 極輕都會風雨衣\",\n \"Remark\": None,\n \"Product\": [],\n \"Delivery\": {}\n }\n ],\n \"Buyer\": {\n \"Email\": None,\n \"Name\": None,\n \"Gender\": -1,\n \"Mobile\": None,\n \"City\": None,\n \"CityId\": None,\n \"District\": None,\n \"DistrictId\": None,\n \"ZipCode\": None,\n \"Address\": None\n },\n \"Owner\": {\n \"Email\": owner_email,\n \"Name\": owner_name,\n \"Gender\": owner_gender,\n \"IDCard\": owner_profile_id,\n \"Mobile\": owner_mobile,\n \"Birthday\": owner_birthday,\n \"City\": owner_city,\n \"CityId\": None,\n \"District\": owner_district,\n \"DistrictId\": None,\n \"ZipCode\": owner_zipcode,\n \"Address\": owner_address,\n \"CivilServants\": 0,\n \"PurchaseMethod\": 0\n },\n \"Driver\": {\n \"Name\": owner_name,\n \"Email\": owner_email\n },\n \"Deliveries\": {},\n \"DeliveryConditions\": {\n \"Type\": -1,\n \"StoreId\": None\n },\n \"Scooter\": {\n \"PDIStoreId\": None,\n \"PairingAccount\": None,\n \"PairingUserId\": None,\n \"SingingDay\": None,\n \"SubsidyType\": -1,\n \"SubsidyApplication\": -1,\n \"SubsidyCounty\": None,\n \"BeDeprecatedSamePerson\": -1,\n \"Subsidy\": {\n \"TesSubsidyTypeId\": 1,\n \"LocalSubsidyTypeId\": 2003,\n \"IsTES\": True,\n \"IsEPA\": True,\n \"IsEPB\": True\n },\n \"Licensing\": {\n \"LicensingStatus\": 1,\n \"LicensingLocation\": licensing_location,\n \"LicensingLocationId\": licensing_location_id,\n \"InsuredYears\": 2,\n \"Stamp\": 1,\n \"ChooseLicensing\": 0,\n \"ChooseRemark\": None,\n \"AgencyFees\": agency_fees\n },\n \"LicenseType\": license_type\n },\n \"Payment\": {\n \"Type\": payment_type,\n \"IsPaymentChange\": is_payment_change,\n \"cash\": {},\n \"loan\": payments\n },\n \"Contract\": {\n \"InvoicesGUINumbers\": None,\n \"InvoicesTitle\": None\n },\n \"Recommend\": {},\n \"IsReceiveAward\": is_receive_award,\n \"DocumentFeePayer\": document_fee_payer,\n \"ProductList\": [\n {\n \"Id\": \"00000000-0000-0000-0000-000000000000\",\n \"Name\": \"\",\n \"Type\": 0,\n \"Descriptions\": None,\n \"Product\":\n products\n }\n ]\n }\n print(data)\n resp = self.init.request('post', '/shopping-cart/modify-order', json=data)\n return resp",
"def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order",
"def action_update_total(self):\n for order in self:\n amount_untaxed = 0.0\n for line in order.order_line_ids:\n amount_untaxed += line.price_subtotal\n order.price_subtotal = amount_untaxed",
"def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL",
"def prep(self, order):\n update = {}\n for col in list(set(self.numeric + self.non_numeric + self.currencies + self.columns)):\n try:\n if col in self.numeric:\n value = float(order[col])\n else:\n value = order[col]\n update[col] = value\n except:\n update[col] = 0.0\n continue\n update = pd.Series(update).fillna(0)\n update['currency_on_hold'] = order['product_id'][-3:] if order['side'] == 'buy' else order['product_id'][:3]\n update['create_time'] = pd.to_datetime(order['time'])\n update['update_time'] = pd.to_datetime(order['time'])\n update['time'] = update.update_time.to_datetime64().astype('int64')//1e9\n update['status'] = order['type']\n update['order_type'] = 'unknown' if not update['order_type'] else update['order_type']\n return update#pd.Series(update).fillna(0)",
"def _calculate_total_order_price(self, actual_order_price: int):\n actual_order_price = actual_order_price if actual_order_price else 0\n total_additional_charges = self.total_additional_charges\n self.total_order_price = actual_order_price + total_additional_charges",
"def tax(self):\n\n self.x = self.a\n self.set_zn(self.x)",
"def _amount_all(self):\n for order in self:\n order.update({\n 'net_rate': order.basic_rate + order.extra_rate\n })",
"def pay_tax(self):\n\t\t# the money comes from nowhere, settlers seem to have an infinite amount of money.\n\t\t# see http://wiki.unknown-horizons.org/index.php/DD/Economy/Settler_taxing\n\t\thappiness_tax_modifier = (float(self.happiness)-50)/200 + 1\n\t\ttaxes = self.tax_base * happiness_tax_modifier * self.inhabitants * self.settlement.tax_setting\n\t\ttaxes = int(round(taxes))\n\t\tself.settlement.owner.inventory.alter(RES.GOLD_ID, taxes)\n\t\tself.last_tax_payed = taxes\n\n\t\t# decrease happiness\n\t\thappiness_decrease = taxes + self.tax_base + ((self.settlement.tax_setting-1)*10)\n\t\thappiness_decrease = int(round(happiness_decrease))\n\t\tself.inventory.alter(RES.HAPPINESS_ID, -happiness_decrease)\n\t\tself._changed()\n\t\tself.log.debug(\"%s: pays %s taxes, -happy: %s new happiness: %s\", self, taxes, \\\n\t\t\t\t\t\t\t\t\t happiness_decrease, self.happiness)",
"def make_payment(self, payment):\n self._balance -= payment",
"def confirm_payment(self, order, amount, transaction_id, backend, description, save=True):\n #! TODO this bit should probably be in the \"if save...\" block below. Check rest of code base first\n OrderPayment.objects.get_or_create(\n order=order,\n amount=Decimal(amount),\n transaction_id=transaction_id,\n backend=backend,\n description=description\n )\n\n if save and self.is_order_paid(order):\n if order.status < Order.PAID:\n # first time completing order. fire the purchase method for products to update inventory or whatever\n for item in order.items.all():\n item.product.purchase(item)\n item.save()\n self.send_payment_confirmation_email(order)\n # Set the order status:\n order.status = Order.PAID\n order.save()\n\n # empty the related cart\n try:\n cart = Cart.objects.get(pk=order.cart_pk)\n if cart.extra.get(\"promocode\",None):\n #! TODO: this is really inelegant maybe use a signal instead?\n from drop.discount.models import PromocodeUsage\n PromocodeUsage.objects.create(\n order=order,\n promocode_id=cart.extra[\"promocode\"]['id']\n )\n cart.empty()\n except Cart.DoesNotExist:\n pass\n\n order.cart_pk = None\n order.save()\n paid.send(sender=self, order=order)",
"def create_account_payment(self, order, user):\n access_token = get_random_string(20)\n domain = SysConfig.get_config('DOMAIN')\n\n with transaction.atomic():\n payment_txn = Transaction.objects.create(gateway=self.gateway,\n order=order,\n description='Transaction for order #%s' % order.id,\n status=Transaction.STATUS_PROCESSING,\n currency=order.currency.code,\n amount=order.charge_amount,\n updated_by=unicode(user),\n created_by=unicode(user))\n payment_txn.add_param('access_token', access_token, user)\n payment_txn.save()\n\n try:\n payment = {\n 'intent': 'sale',\n 'redirect_urls': {\n 'return_url': 'http://%s%s' % (domain, reverse('payments_process_account_success',\n args=[payment_txn.id, access_token])),\n 'cancel_url': 'http://%s%s' % (domain, reverse('payments_process_account_cancel',\n args=[payment_txn.id, access_token])),\n },\n 'payer': {\n 'payment_method': 'paypal',\n },\n 'transactions': [{\n 'item_list': {\n 'items': [{\n 'name': item.product.name,\n 'sku': item.product.name,\n 'price': _exchange_amount(item.price, order.exchange_rate),\n 'currency': order.currency.code,\n 'quantity': item.quantity\n } for item in order.items.all()]\n },\n 'amount': {\n 'total': unicode(order.charge_amount),\n 'currency': order.currency.code,\n 'details': {\n 'subtotal': _exchange_amount(order.sub_total, order.exchange_rate),\n 'tax': _exchange_amount(order.taxes, order.exchange_rate),\n 'shipping': _exchange_amount(order.shipping_cost, order.exchange_rate)\n }\n },\n 'description': 'Payment for order #%s' % (order.id)\n }],\n }\n\n logger.info('Processing PayPal account.', extra=payment)\n payment = paypalrestsdk.Payment(payment, api=self.api)\n payment_created = payment.create()\n except Exception as e:\n logger.error('Failed to process PayPal account (transaction_id: %s)' % payment_txn.id)\n logger.exception(e)\n\n raise DoorstepError('We failed to process your PayPal account at the moment, please try again later!')\n\n if payment_created:\n with transaction.atomic():\n payment_txn.add_param('id', unicode(payment.id), user)\n payment_txn.add_param('create_time', unicode(payment.create_time), user)\n payment_txn.add_param('update_time', unicode(payment.update_time), user)\n payment_txn.add_param('state', unicode(payment.state), user)\n payment_txn.add_param('intent', unicode(payment.intent), user)\n payment_txn.add_param('payment_method', unicode(payment.payer.payment_method), user)\n payment_txn.save()\n\n for link in payment.links:\n if link.rel == 'approval_url' and link.method == 'REDIRECT':\n return link.href\n\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.error_message = payment.error['message']\n payment_txn.save()\n\n raise DoorstepError('We failed to process your PayPal account at the moment, please try again later!')",
"def credit_card_payment(self, card, order, user):\n with transaction.atomic():\n payment_txn = Transaction.objects.create(gateway=self.gateway,\n order=order,\n description='Transaction for order #%s' % order.id,\n status=Transaction.STATUS_PROCESSING,\n currency=order.currency.code,\n amount=order.charge_amount,\n updated_by=unicode(user),\n created_by=unicode(user))\n try:\n charge = stripe.Charge.create(\n amount=int(order.charge_amount * 100), # 100 cents to charge $1.00\n currency=order.currency.code.lower(),\n description='Payment for order #%s' % (order.id),\n card={\n 'number': card['number'],\n 'name': card['name'],\n 'exp_month': card['expire_month'],\n 'exp_year': card['expire_year'],\n 'cvc': card['cvv2']\n })\n\n with transaction.atomic():\n # Saving only few necessary fields for refunding\n payment_txn.status = Transaction.STATUS_APPROVED\n payment_txn.add_param('id', unicode(charge.id), user)\n payment_txn.add_param('created', unicode(charge.created), user)\n payment_txn.add_param('amount', unicode(charge.amount), user)\n payment_txn.add_param('card_id', unicode(charge.card.id), user)\n payment_txn.add_param('card_last4', unicode(charge.card.last4), user)\n payment_txn.add_param('card_country', unicode(charge.card.country), user)\n payment_txn.add_param('card_brand', unicode(charge.card.brand), user)\n payment_txn.save()\n\n order.payment_status = Order.PAYMENT_PAID\n order.updated_by = unicode(user)\n order.save()\n\n except stripe.error.CardError as e:\n # The card has been declined\n body = e.json_body\n error = body['error']\n logger.warning('Credit Card has been declined (transaction_id: %s)' % payment_txn.id, extra=error)\n\n payment_txn.status = Transaction.STATUS_FAILED\n payment_txn.error_message = error['message']\n payment_txn.save()\n\n raise DoorstepError(error['message'])\n except Exception as e:\n logger.error('Failed to process Credit Card (transaction_id: %s)' % payment_txn.id)\n logger.exception(e)\n\n raise DoorstepError('We failed to process your Credit Card at the moment, please try again later!')"
] | [
"0.61190176",
"0.5793169",
"0.560548",
"0.5537112",
"0.5497742",
"0.54355586",
"0.54355586",
"0.5407957",
"0.54025096",
"0.5350767",
"0.5334678",
"0.5318417",
"0.5301097",
"0.529851",
"0.5259051",
"0.52478224",
"0.52440584",
"0.5229148",
"0.5221458",
"0.5206095",
"0.5195599",
"0.5190608",
"0.5188909",
"0.5165317",
"0.5146614",
"0.5146019",
"0.5124139",
"0.5063214",
"0.5052435",
"0.5015851"
] | 0.7710866 | 0 |
Given an order, updates the order with prevailing discount rules onto the order's debit attribute | def apply_discounts(order_obj):
all_dedits = order_obj.debits
other_debit = filter(lambda x: x["coll_name"] != discounts.Discount.coll_name(), all_dedits)
all_discounts = discounts.get_all()
valid_discounts = []
for item_dic in order_obj.items:
for d in all_discounts:
item_obj = items.get(coerce_bson_id(item_dic["obj_id"]))
if item_obj is None: continue
if discounts.valid_on_item(d, item_obj):
valid_discounts += [{
"obj_id": d._id,
"coll_name": discounts.Discount.coll_name(),
"amount": discounts.discounted_value(item_obj.price, d),
}]
break
order_obj.debits = other_debit + valid_discounts
return valid_discounts | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_discount_rate(self, order_value, discount_rate):\r\n if float(discount_rate) == 0:\r\n self.discount_rate = float(RetailCustomer.discount_rate)\r\n else:\r\n self.discount_rate = float(discount_rate)",
"def apply_discount(self, product):\n pass",
"def redeem(self, instance, customer, save=True):\n start = timezone.now().date()\n end = start + relativedelta(months=self.duration)\n discount = Discount(instance=instance,\n coupon=self,\n start=start,\n end=end,\n customer=customer)\n discount.full_clean()\n if save:\n discount.save()\n return discount",
"def update_order(self, order):\n order.order_id = self.order_id\n order.average_price = self.avg_execution_price\n order.symbol = self.symbol\n order.side = self.side\n order.type = self.order_type\n order.amount = self.original_amount\n order.price = self.price\n order.filled = self.executed_amount\n order.remaining = self.remaining_amount\n if self.is_cancelled:\n order.status = exchanges.Order.Status.CANCELLED\n elif self.is_live:\n order.status = exchanges.Order.Status.OPEN\n else:\n order.status = exchanges.Order.Status.CLOSED\n return order",
"def update_order_undiscounted_price(apps, schema_editor):\n\n def on_migrations_complete(sender=None, **kwargs):\n order_ids = list(kwargs.get(\"updated_orders_pks\"))\n send_order_updated.delay(order_ids)\n\n Order = apps.get_model(\"order\", \"Order\")\n OrderLine = apps.get_model(\"order\", \"OrderLine\")\n\n # Take orders that has applied lines voucher discounts, but the discount is\n # not visible in undiscounted price.\n orders_to_update = Order.objects.filter(\n Exists(\n OrderLine.objects.filter(\n order_id=OuterRef(\"id\"), voucher_code__isnull=False\n )\n ),\n total_gross_amount=F(\"undiscounted_total_gross_amount\"),\n ).order_by(\"id\")\n\n updated_orders_pks = []\n for batch_pks in queryset_in_batches(orders_to_update):\n orders = Order.objects.filter(pk__in=batch_pks)\n lines = OrderLine.objects.filter(order_id__in=orders.values(\"id\")).values(\n \"order_id\",\n \"undiscounted_total_price_gross_amount\",\n \"total_price_gross_amount\",\n \"undiscounted_total_price_net_amount\",\n \"total_price_net_amount\",\n )\n lines_discount_data = defaultdict(lambda: (0, 0))\n for data in lines:\n discount_amount_gross = (\n data[\"undiscounted_total_price_gross_amount\"]\n - data[\"total_price_gross_amount\"]\n )\n discount_amount_net = (\n data[\"undiscounted_total_price_net_amount\"]\n - data[\"total_price_net_amount\"]\n )\n current_discount_gross, current_discount_net = lines_discount_data[\n data[\"order_id\"]\n ]\n lines_discount_data[data[\"order_id\"]] = (\n current_discount_gross + discount_amount_gross,\n current_discount_net + discount_amount_net,\n )\n\n for order in orders:\n discount_amount_gross, discount_amount_net = lines_discount_data.get(\n order.id\n )\n if discount_amount_gross > 0 or discount_amount_net > 0:\n order.undiscounted_total_gross_amount += discount_amount_gross\n order.undiscounted_total_net_amount += discount_amount_net\n\n updated_orders_pks.append(order.id)\n\n Order.objects.bulk_update(\n orders,\n [\n \"undiscounted_total_gross_amount\",\n \"undiscounted_total_net_amount\",\n ],\n )\n\n # If we updated any order we should trigger `order_updated` after migrations\n if updated_orders_pks:\n updated_orders_pks = set(updated_orders_pks)\n sender = registry.get_app_config(\"order\")\n post_migrate.connect(\n partial(on_migrations_complete, updated_orders_pks=updated_orders_pks),\n weak=False,\n dispatch_uid=\"send_order_updated\",\n sender=sender,\n )",
"def get_discount(self, price):\r\n pass",
"def discount(self, cart):",
"def fix_order_price(order: Order):\n if not order.price:\n LOG.warning('Price of order %s was None', order.id)\n fix = get_closed_order()\n if fix.id == order.id and fix.price:\n order.price = fix.price\n del fix\n return order",
"def bulk_item(order: Order) -> Decimal:\n discount = Decimal(0)\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * Decimal('0.1')\n return discount",
"def apply_tax(order_obj):\n tax_rule = taxes.get()\n all_credits = order_obj.credits\n other_credit = filter(lambda x: x[\"coll_name\"] != taxes.TaxRule.coll_name(), all_credits)\n\n if tax_rule is not None:\n order_obj.credits = other_credit + [{\n \"obj_id\": tax_rule._id,\n \"coll_name\": taxes.TaxRule.coll_name(),\n \"amount\": taxes.amount(tax_rule, order_obj),\n }]\n else:\n order_obj.credits = other_credit",
"def _prepare_inv_line(self, cr, uid, account_id, order_line, context=None):\n result = super(purchase_order, self)._prepare_inv_line(cr, uid, account_id, order_line, context=None)\n result['discount2'] = order_line.discount2 or 0.0\n return result",
"def bulk_item(order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * .1\n return discount",
"def bulk_item_promo(order: Order):\n discount = 0\n for item in order.cart:\n if item.quantity >= 20:\n discount += item.total() * 0.1\n return discount",
"def discounted(self, discounted):\n\n self._discounted = discounted",
"def compute_amount_discounted(promotion, amount):\n if promotion.promo_type == '1': # % off\n amount_discounted = promotion.promo_amount * amount / Decimal(100)\n amount_discounted = Decimal(str(round(amount_discounted, 2)))\n elif promotion.promo_type == '2': # $ off\n if promotion.promo_amount < amount:\n amount_discounted = promotion.promo_amount\n else:\n amount_discounted = amount\n elif promotion.promo_type == '3': # fixed $ cost\n if promotion.promo_amount < amount:\n amount_discounted = amount - promotion.promo_amount\n else:\n # If you have a fixed cost promo of $20, but your items \n # only cost $10, you don't save.\n amount_discounted = 0\n LOG.debug('compute discount: amount_discounted = %s' % amount_discounted)\n return amount_discounted",
"def add_discount(self, bill):\n\n discounts_queryset = Discount.objects.prefetch_related('product')\n\n total_discount = 0\n\n for discount in discounts_queryset:\n discount_products = discount.product.all()\n if self.order.product in discount_products:\n bill['discounts'].append({'discount_title': discount.title,\n 'discount_size': discount.size})\n\n total_discount += discount.size\n if total_discount > 100:\n total_discount = 100\n\n bill['total'] = bill['total'] - bill['total'] / 100 * total_discount\n\n return bill",
"def discount(ir, period):\n\treturn ir.discount(period)",
"def duty_free(price: int, discount: int, holiday_cost: int) -> int:\n if holiday_cost == 500:\n return holiday_cost\n\n discount /= 100\n price = holiday_cost / (price * discount)\n price = int(price)\n return price",
"def add_discount(self, discount):\n self.discounts.append(discount)",
"def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount",
"def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount",
"def discount_amount(self, discount_amount):\n\n self._discount_amount = discount_amount",
"def mark_refunded(self):\n order = self.clone()\n order.status = Order.STATUS_REFUNDED\n order.save()\n return order",
"def redeem_coupon(coupon_version, order):\n coupon_redemption, _ = CouponRedemption.objects.update_or_create(\n order=order, defaults={\"coupon_version\": coupon_version}\n )\n return coupon_redemption",
"def fill_order(self, order: Order) -> None:\n order = self.get_order_by_id(order.id)\n order.status = OrderStatus.FILL",
"def apply_discounts(self):\n # for each valid discount...\n for discount in list(DiscountTypes):\n # only apply the discount if it is set in the cart\n if(discount in self.cart.discounts):\n getattr(self, discount.value)()",
"def modify_price(self, price):\n if price is not None and self.is_cancellable:\n log.info(\"bo#%s: modify price (pending) order \" % self.ticket)\n not_implemented_error(\"Can't modify price for now (only for pending orders which wasn't triggered\")\n order_id = self.order_id_master\n cancel_order(order_id) # DANGEROUS! it should be atomic operation!\n #style = self.style\n #if self.is_limit:\n #elif self.is_stop:\n #elif self.is_stop_limit\n #order_id = order(self.symbol, self.volume, style=new_style))\n \n else:\n return",
"def deposit(self, amount):\n self.dep = amount\n self.balance += self.dep",
"def implied_discount_factor(p1: Instrument, c1: Instrument, p2: Instrument, c2: Instrument) -> float:\n return (c1.price - p1.price - c2.price + p2.price)/ (c2.strike - c1.strike)",
"def update_orders(comp, order, user_correct, payment_id):\n users_orders = []\n for item in order.items.all():\n users_orders.append(item.id)\n item.is_paid = True\n item.save()\n order.related_competition = comp\n order.payment_id = payment_id\n order.order_date = timezone.now()\n order.answer_correct = user_correct\n order.ordered = True\n order.save()\n return order"
] | [
"0.6252258",
"0.618614",
"0.6119923",
"0.5988139",
"0.59648347",
"0.5937784",
"0.5921841",
"0.5921826",
"0.5836305",
"0.5754859",
"0.5699986",
"0.5638767",
"0.5624367",
"0.55587643",
"0.554168",
"0.5500832",
"0.5458696",
"0.5417213",
"0.5393511",
"0.53718966",
"0.53718966",
"0.53718966",
"0.5368851",
"0.53605384",
"0.5334627",
"0.53311634",
"0.53215003",
"0.5318726",
"0.5318526",
"0.53134304"
] | 0.64523685 | 0 |
Builds a pretrained VGG19 model that outputs image features extracted at the third block of the model | def build_vgg(self):
vgg = VGG19(weights="imagenet")
# Set outputs to outputs of last conv. layer in block 3
# See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py
vgg.outputs = [vgg.layers[9].output]
img = Input(shape=self.hr_shape)
# Extract image features
img_features = vgg(img)
return Model(img, img_features) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_vgg():\n input_shape = (256, 256, 3)\n\n vgg = keras.applications.VGG19(include_top = False , input_shape = input_shape , weights=\"imagenet\")\n features = vgg.get_layer(index = 9).output\n\n model = keras.Model(inputs=[vgg.inputs], outputs=[features])\n return model",
"def build_vgg(self):\n # Get the vgg network. Extract features from Block 5, last convolution.\n vgg = tf.keras.applications.VGG19(weights=\"imagenet\", input_shape=self.hr_shape, include_top=False)\n vgg.trainable = False\n for layer in vgg.layers:\n layer.trainable = False\n\n # Create model and compile\n model = tf.keras.models.Model(inputs=vgg.input, outputs=vgg.get_layer(\"block5_conv4\").output)\n\n return model",
"def build_vgg(self, weights=\"imagenet\"): \n \n # Input image to extract features from\n img = Input(shape=(self.img_rows, self.img_cols, 3))\n\n # Mean center and rescale by variance as in PyTorch\n processed = Lambda(lambda x: (x-self.mean) / self.std)(img)\n \n # If inference only, just return empty model \n if self.inference_only:\n model = Model(inputs=img, outputs=[img for _ in range(len(self.vgg_layers))])\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n return model\n \n # Get the vgg network from Keras applications\n if weights in ['imagenet', None]:\n vgg = VGG16(weights=weights, include_top=False)\n else:\n vgg = VGG16(weights=None, include_top=False)\n vgg.load_weights(weights, by_name=True)\n\n # Output the first three pooling layers\n vgg.outputs = [vgg.layers[i].output for i in self.vgg_layers] \n \n # Create model and compile\n model = Model(inputs=img, outputs=vgg(processed))\n model.trainable = False\n model.compile(loss='mse', optimizer='adam')\n\n return model",
"def build_model(self):\n \n start_time = time.time()\n print(\"build model started\")\n # label\n self.FA = tf.placeholder(dtype=tf.int32, shape=[None])\n self.ges = tf.placeholder(dtype=tf.int32, shape=[None])\n self.obj = tf.placeholder(dtype=tf.int32, shape=[None])\n \n self.images = tf.placeholder(dtype=tf.float32, shape=[None, height, width, 3])\n batch_size = tf.shape(self.images)[0]\n rgb_scaled = self.images * 255.0\n\n # Convert RGB to BGR\n VGG_MEAN = [103.939, 116.779, 123.68]\n red, green, blue = tf.split(axis=3, num_or_size_splits=3, value=rgb_scaled)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n blue - VGG_MEAN[0],\n green - VGG_MEAN[1],\n red - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n \n with tf.variable_scope(\"vgg19\"):\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.conv3_4 = self.conv_layer(self.conv3_3, \"conv3_4\")\n self.pool3 = self.max_pool(self.conv3_4, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.conv4_4 = self.conv_layer(self.conv4_3, \"conv4_4\")\n self.pool4 = self.max_pool(self.conv4_4, 'pool4')\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.conv5_4 = self.conv_layer(self.conv5_3, \"conv5_4\")\n self.pool5 = self.max_pool(self.conv5_4, 'pool5')\n\n \n shape = self.pool5.get_shape()\n size = 1\n for dim in shape[1:]:\n size *= dim.value\n \n # dense\n with tf.variable_scope('dense') as scope:\n # Move everything into depth so we can perform a single matrix multiply.\n reshape = tf.reshape(self.pool5, [-1, size])\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[size, 192]))\n biases = tf.get_variable('biases', [192], initializer=tf.constant_initializer(0.1))\n dense = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)\n\n\n # linear layer(WX + b),\n with tf.variable_scope('softmax_linear_FA') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 2]))\n biases = tf.get_variable('biases', [2], initializer=tf.constant_initializer(0.1))\n softmax_linear_FA = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_FA = tf.nn.softmax(softmax_linear_FA)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.FA, logits=softmax_linear_FA, name='cross_entropy')\n cross_entropy_mean_FA = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_ges') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 13]))\n biases = tf.get_variable('biases', [13], initializer=tf.constant_initializer(0.1))\n softmax_linear_ges = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_ges = tf.nn.softmax(softmax_linear_ges)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.ges, logits=softmax_linear_ges, name='cross_entropy')\n cross_entropy_mean_ges = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n with tf.variable_scope('softmax_linear_obj') as scope:\n weights = tf.get_variable('weights', initializer=tf.truncated_normal(shape=[192, 24]))\n biases = tf.get_variable('biases', [24], initializer=tf.constant_initializer(0.1))\n softmax_linear_obj = tf.add(tf.matmul(dense, weights), biases, name=scope.name)\n self.output_obj = tf.nn.softmax(softmax_linear_obj)\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n labels=self.obj, logits=softmax_linear_obj, name='cross_entropy')\n cross_entropy_mean_obj = tf.reduce_mean(cross_entropy, name='cross_entropy_mean')\n\n self.loss = cross_entropy_mean_FA + cross_entropy_mean_ges + cross_entropy_mean_obj\n self.lr = tf.placeholder(tf.float32, [])\n self.global_step = tf.Variable(0, name=\"global_step\", trainable=False)\n optimizer = tf.train.AdamOptimizer(self.lr)\n grads_and_vars = optimizer.compute_gradients(self.loss)\n self.train_op = optimizer.apply_gradients(grads_and_vars, global_step=self.global_step)\n self.data_dict = None\n print((\"build model finished: %ds\" % (time.time() - start_time)))",
"def build_model():\n pretrained_model = VGG16(input_shape=(fixed_size[0], fixed_size[1], 3), weights='imagenet', include_top=False)\n # We will not train the layers imported.\n for layer in pretrained_model.layers:\n layer.trainable = False\n transfer_learning_model = Sequential()\n transfer_learning_model.add(pretrained_model)\n transfer_learning_model.add(Flatten())\n transfer_learning_model.add(Dense(128, activation='relu', kernel_initializer='he_uniform'))\n transfer_learning_model.add(Dropout(0.5))\n transfer_learning_model.add(Dense(3, activation='softmax'))\n transfer_learning_model.summary()\n opt = Adam(learning_rate=.0003)\n transfer_learning_model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy'])\n return transfer_learning_model",
"def build_vgg(hr_shape):\n \n vgg = VGG19(weights=\"imagenet\")\n # Set outputs to outputs of last conv. layer in block 3\n # See architecture at: https://github.com/keras-team/keras/blob/master/keras/applications/vgg19.py\n vgg.outputs = [vgg.layers[9].output]\n\n img = Input(hr_shape)\n\n # Extract image features\n img_features = vgg(img)\n\n return Model(img, img_features)",
"def model(pretrained=False, **kwargs):\r\n\r\n layers = make_layers(cfg['O'], dilation=dilation['D1'])\r\n cnv = np.cumsum(cnvs['OI']) if kwargs['args'].IN or kwargs['args'].INL else np.cumsum(cnvs['O'])\r\n model = VGG(layers, cnvs=cnv, **kwargs)\r\n if pretrained:\r\n pre2local_keymap = [('features.{}.weight'.format(i), 'conv1_2.{}.weight'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.bias'.format(i), 'conv1_2.{}.bias'.format(i)) for i in range(10)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 10), 'conv3.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 10), 'conv3.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 17), 'conv4.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 17), 'conv4.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.weight'.format(i + 24), 'conv5.{}.weight'.format(i)) for i in range(7)]\r\n pre2local_keymap += [('features.{}.bias'.format(i + 24), 'conv5.{}.bias'.format(i)) for i in range(7)]\r\n pre2local_keymap = dict(pre2local_keymap)\r\n\r\n\r\n model_dict = model.state_dict()\r\n pretrained_file = os.path.join(kwargs['args'].pretrained_model_dir, kwargs['args'].pretrained_model)\r\n if os.path.isfile(pretrained_file):\r\n pretrained_dict = torch.load(pretrained_file)\r\n print('load pretrained model from {}'.format(pretrained_file))\r\n else:\r\n pretrained_dict = model_zoo.load_url(model_urls['vgg16'])\r\n print('load pretrained model from {}'.format(model_urls['vgg16']))\r\n # 0. replace the key\r\n pretrained_dict = {pre2local_keymap[k] if k in pre2local_keymap.keys() else k: v for k, v in\r\n pretrained_dict.items()}\r\n # *. show the loading information\r\n for k in pretrained_dict.keys():\r\n if k not in model_dict:\r\n print('Key {} is removed from vgg16'.format(k))\r\n print(' ')\r\n for k in model_dict.keys():\r\n if k not in pretrained_dict:\r\n print('Key {} is new added for DA Net'.format(k))\r\n # 1. filter out unnecessary keys\r\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\r\n # 2. overwrite entries in the existing state dict\r\n model_dict.update(pretrained_dict)\r\n # 3. load the new state dict\r\n model.load_state_dict(model_dict)\r\n return model",
"def get_vgg_model(self):\n # Load our model. We load pretrained VGG, trained on imagenet data\n self.vgg_model = tf.keras.applications.vgg19.VGG19(\n include_top=False, weights='imagenet')\n self.vgg_model.trainable = False\n # Get output layers corresponding to style and content layers\n self.style_outputs = [self.vgg_model.get_layer(\n name).output for name in self.style_layers]\n self.content_outputs = [self.vgg_model.get_layer(\n name).output for name in self.content_layers]\n self.model_outputs = self.style_outputs + self.content_outputs\n # Build model\n self.model = models.Model(self.vgg_model.input, self.model_outputs)",
"def vgg19(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['E']), **kwargs)\n\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19'],\n model_dir='../'))\n return model",
"def vgg16(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16.paddle')\n model.set_state_dict(model_dict)\n return model",
"def model(pretrained=False, **kwargs):\n model = VGG(make_layers(cfg['D1'], dilation=dilation['D1']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']))\n return model",
"def VGGModel(input_shape):\n \n\n X_input = Input(input_shape)\n \n # Creating a Neural Network (VGG-16)\n\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(X_input)\n X = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(X)\n\n # Block 2\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(X)\n X = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(X)\n\n # Block 3\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(X)\n X = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(X)\n\n # Block 4\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(X)\n\n # Block 5\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(X)\n X = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(X)\n X = MaxPooling2D((2, 2), strides=(2, 2), name='block5_pool')(X)\n \n X = Flatten()(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc')(X)\n X = Dense(4096, activation='relu', kernel_initializer = 'he_normal', name='fc2')(X)\n X = Dense(2048, activation='relu', kernel_initializer = 'he_normal', name='fc3')(X)\n X = Dense(1024, activation='relu', kernel_initializer = 'he_normal', name='fc4')(X)\n X = Dense(512, activation='relu', kernel_initializer = 'he_normal', name='fc5')(X)\n X = Dense(256, activation='relu', kernel_initializer = 'he_normal', name='fc6')(X)\n X = Dense(2, activation='linear', name='regression')(X)\n model = Model(inputs=X_input, outputs = X, name='HappyModel')\n print(model.summary())\n \n return model",
"def __init__(self, img_rows=400, img_cols=400, vgg_weights=\"imagenet\", inference_only=False, net_name='default', gpus=1, vgg_device=None):\n \n # Settings\n self.img_rows = img_rows\n self.img_cols = img_cols\n self.img_overlap = 30\n self.inference_only = inference_only\n self.net_name = net_name\n self.gpus = gpus\n self.vgg_device = vgg_device\n\n # Scaling for VGG input\n self.mean = [0.485, 0.456, 0.406]\n self.std = [0.229, 0.224, 0.225]\n \n #get PowerSpect_CMB\n reader = np.zeros((2507,))\n fp = open('./data/COM_PowerSpect_CMB-base-plikHM-TTTEEE-lowl-lowE-lensing-minimum-theory_R3.01.txt')\n \n for i,line in enumerate(fp):\n if i >= 1:\n reader[i-1] = line.split()[1]\n \n fp.close() \n readers = np.log(reader)\n self.cl = K.constant(readers)\n # Assertions\n assert self.img_rows >= 256, 'Height must be >256 pixels'\n assert self.img_cols >= 256, 'Width must be >256 pixels'\n\n # Set current epoch\n self.current_epoch = 0\n \n # VGG layers to extract features from (first maxpooling layers, see pp. 7 of paper)\n self.vgg_layers = [3, 6, 10]\n\n # Instantiate the vgg network\n if self.vgg_device:\n with tf.device(self.vgg_device):\n self.vgg = self.build_vgg(vgg_weights)\n else:\n self.vgg = self.build_vgg(vgg_weights)\n \n # Create UNet-like model\n if self.gpus <= 1:\n self.model, inputs_mask= self.build_pconv_unet()\n self.compile_pconv_unet(self.model, inputs_mask) \n else:\n with tf.device(\"/cpu:0\"):\n self.model, inputs_mask = self.build_pconv_unet()\n self.model = multi_gpu_model(self.model, gpus=self.gpus)\n self.compile_pconv_unet(self.model, inputs_mask)",
"def vgg16_bn(pretrained,**kwargs):\n model = VGG(make_layers(cfg['D'], batch_norm=True), **kwargs)\n if pretrained:\n model_dict = paddle.load('./pre_model/vgg16_bn.paddle')\n model.set_state_dict(model_dict)\n return model",
"def create_model(input_tensor, mode, hyper_params):\n model = {}\n with tf.variable_scope('vgg16') as scope:\n net = tf.cast(input_tensor[\"image\"], dtype=tf.float32, name=\"input/cast\")\n model[\"image\"] = net\n mean = tf.constant([123.68, 116.779, 103.939], dtype=tf.float32, shape=[1, 1, 1, 3], name='img_mean')\n net = net - mean\n model[\"image-normalized\"] = net\n\n net = _create_conv2_block(model, net, filters=64, layer_number=1)\n net = _create_conv2_block(model, net, filters=128, layer_number=2)\n net = _create_conv3_block(model, net, filters=256, layer_number=3)\n net = _create_conv3_block(model, net, filters=512, layer_number=4)\n net = _create_conv3_block(model, net, filters=512, layer_number=5)\n print(net.get_shape())\n\n if not hyper_params.vgg16.encoder_only:\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(7, 7), strides=(1, 1), name=\"fc1\", activation=tf.nn.relu)\n model[\"vgg16/fc1\"] = net\n net = tf.layers.conv2d(inputs=net, filters=4096, kernel_size=(1, 1), strides=(1, 1), name=\"fc2\", activation=tf.nn.relu)\n model[\"vgg16/fc2\"] = net\n net = tf.layers.conv2d(inputs=net, filters=1000, kernel_size=(1, 1), strides=(1, 1), name=\"logits\", activation=None)\n model[\"logits\"] = net\n net = tf.nn.softmax(net)\n model[\"probs\"] = net\n return model",
"def VGG19(include_top=True, weights='imagenet',\n input_tensor=None, input_shape=None,\n pooling=None,\n classes=1000):\n if weights not in {'imagenet', None}:\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization) or `imagenet` '\n '(pre-training on ImageNet).')\n\n if weights == 'imagenet' and include_top and classes != 1000:\n raise ValueError('If using `weights` as imagenet with `include_top`'\n ' as true, `classes` should be 1000')\n # Determine proper input shape\n\n\n img_input = input_tensor\n # Block 1\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(img_input)\n x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x)\n\n # Block 2\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x)\n x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x)\n\n # Block 3\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv3')(x)\n x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x)\n\n # Block 4\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv3')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv4')(x)\n x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x)\n\n # Block 5\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3')(x)\n x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv4')(x)\n\n model = Model(img_input, x, name='vgg19')\n\n if weights == 'imagenet':\n if include_top:\n weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH,\n cache_subdir='models',\n file_hash='cbe5617147190e668d6c5d5026f83318')\n else:\n weights_path = get_file('vgg19_weights_tf_dim_ordering_tf_kernels_notop.h5',\n WEIGHTS_PATH_NO_TOP,\n cache_subdir='models',\n file_hash='253f8cb515780f3b799900260a226db6')\n model.load_weights(weights_path)\n\n return model",
"def vgg16(pretrained=False, **kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = VGG(make_layers(cfg['D']), **kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg16']), strict=False)\n return model",
"def vgg19(pretrained=False,SEED=0,Ratio=0,**kwargs):\n if pretrained:\n kwargs['init_weights'] = False\n model = MyVGG(make_layers(cfg['E'],SEED=SEED,Ratio=Ratio),**kwargs)\n if pretrained:\n model.load_state_dict(model_zoo.load_url(model_urls['vgg19']))\n return model",
"def load_pretrained_layers(self):\n # Current state of base\n state_dict = self.state_dict()\n param_names = list(state_dict.keys())\n\n # Pretrained VGG base\n pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()\n pretrained_param_names = list(pretrained_state_dict.keys())\n\n # Transfer conv. parameters from pretrained model to current model\n for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters\n state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]\n\n # Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7\n # fc6\n conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)\n conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)\n state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)\n state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)\n # fc7\n conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)\n conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)\n state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)\n state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)\n\n # Note: an FC layer of size (K) operating on a flattened version (C*H*W) of a 2D image of size (C, H, W)...\n # ...is equivalent to a convolutional layer with kernel size (H, W), input channels C, output channels K...\n # ...operating on the 2D image of size (C, H, W) without padding\n\n self.load_state_dict(state_dict)\n\n print(\"\\nLoaded base model.\\n\")",
"def build_finetuned_model(args, input_shape, fc_size):\n # setup model\n vgg, base_vgg = VGGWithCustomLayers(args.nb_classes, input_shape, fc_size)\n # setup layers to be trained or not\n setup_trainable_layers(vgg, args.layers_to_freeze)\n # compiling the model\n vgg.compile(optimizer=SGD(lr=0.0001, momentum=0.9), loss='categorical_crossentropy', metrics=['accuracy'])\n\n return vg",
"def vgg_16(input_shape=(224, 224, 3), output_shape=1000):\n model = Sequential()\n \n # layer 1 ~ 2 (filter: 64)\n model.add(Input(shape=input_shape))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(64, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 112 x 112 x 64\n \n # layer 3 ~ 4 (filter: 128)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(128, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 56 x 56 x 128\n \n # layer 5 ~ 7 (filter: 256)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(256, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 28 x 28 x 256\n \n # layer 8 ~ 10 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 14 x 14 x 512\n \n # layer 11 ~ 13 (filter: 512)\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(ZeroPadding2D((1, 1)))\n model.add(Conv2D(512, (3, 3), activation='relu'))\n model.add(MaxPool2D((2, 2), strides=(2, 2)))\n # output size: 7 x 7 x 512\n \n # layer 14 ~ 16 (Fully Connected)\n model.add(Flatten())\n # flatten: 7 x 7 x 512 = 25,088\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu'))\n model.add(Dropout(0.5))\n model.add(Dense(output_shape, activation='softmax'))\n # categorized by output shape\n \n return model",
"def create():\n with torch.set_grad_enabled(False):\n model = torch.hub.load(\n \"pytorch/vision:v0.6.0\", \"vgg11\", pretrained=True).eval()\n\n with_cuda = torch.cuda.is_available()\n if with_cuda:\n model.to(\"cuda\")\n else:\n logging.warn(\"Running on CPU, no CUDA detected.\")\n\n def call(features):\n images = features[\"image\"].numpy()\n # Normalize according to the documentation. Note that the pro-processing\n # will already have the range normalized to [0, 1].\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n images_normalized = (images - mean) / std\n # Reshape from [batch, h, w, c] -> [batch, c, h, w]\n images_normalized_bchw = np.transpose(\n images_normalized, [0, 3, 1, 2]).astype(np.float32).copy()\n with torch.no_grad():\n images_torch = torch.from_numpy(images_normalized_bchw)\n if with_cuda:\n images_torch = images_torch.to(\"cuda\")\n logits = model(images_torch)\n return torch.nn.functional.softmax(logits, dim=-1).cpu().numpy()\n\n preprocess_config = \"resize_small(256)|central_crop(224)|value_range(0,1)\"\n preprocess_fn = pipeline_builder.get_preprocess_fn(\n preprocess_config, remove_tpu_dtypes=False)\n return call, preprocess_fn",
"def build(self):\n\n # bgr_ = bgr*255.0\n bgr_= self.X\n start_time = time.time()\n print(\"build model started\")\n\n # blue ,green, red = tf.split(axis=3, num_or_size_splits=3, value= bgr)\n red ,green, blue, = tf.split(axis=3, num_or_size_splits=3, value= bgr_)\n assert red.get_shape().as_list()[1:] == [224, 224, 1]\n assert green.get_shape().as_list()[1:] == [224, 224, 1]\n assert blue.get_shape().as_list()[1:] == [224, 224, 1]\n bgr = tf.concat(axis=3, values=[\n # blue - VGG_MEAN[0],\n # green - VGG_MEAN[1],\n # red - VGG_MEAN[2],\n\n red - VGG_MEAN[0],\n green - VGG_MEAN[1],\n blue - VGG_MEAN[2],\n ])\n assert bgr.get_shape().as_list()[1:] == [224, 224, 3]\n\n\n\n print(bgr.shape)\n\n self.conv1_1 = self.conv_layer(bgr, \"conv1_1\")\n self.conv1_2 = self.conv_layer(self.conv1_1, \"conv1_2\")\n self.pool1 = self.max_pool(self.conv1_2, 'pool1')\n\n self.conv2_1 = self.conv_layer(self.pool1, \"conv2_1\")\n self.conv2_2 = self.conv_layer(self.conv2_1, \"conv2_2\")\n self.pool2 = self.max_pool(self.conv2_2, 'pool2')\n\n\n\n\n self.conv3_1 = self.conv_layer(self.pool2, \"conv3_1\")\n self.conv3_2 = self.conv_layer(self.conv3_1, \"conv3_2\")\n self.conv3_3 = self.conv_layer(self.conv3_2, \"conv3_3\")\n self.pool3 = self.max_pool(self.conv3_3, 'pool3')\n\n self.conv4_1 = self.conv_layer(self.pool3, \"conv4_1\")\n self.conv4_2 = self.conv_layer(self.conv4_1, \"conv4_2\")\n self.conv4_3 = self.conv_layer(self.conv4_2, \"conv4_3\")\n self.pool4 = self.max_pool(self.conv4_3, 'pool4')\n\n\n\n\n\n self.conv5_1 = self.conv_layer(self.pool4, \"conv5_1\")\n self.conv5_2 = self.conv_layer(self.conv5_1, \"conv5_2\")\n self.conv5_3 = self.conv_layer(self.conv5_2, \"conv5_3\")\n self.pool5 = self.max_pool(self.conv5_3, 'pool5')\n\n self.fc6 = self.fc_layer(self.pool5, \"fc6\")\n assert self.fc6.get_shape().as_list()[1:] == [4096]\n self.relu6 = tf.nn.relu(self.fc6)\n\n self.fc7 = self.fc_layer(self.relu6, \"fc7\")\n self.relu7 = tf.nn.relu(self.fc7)\n\n self.fc8 = self.fc_layer(self.relu7, \"fc8\")\n\n # self.fc9 = self.fc_layer(self.fc8,'fc9')\n # self.relu9 = tf.nn.relu(self.fc9)\n\n\n\n\n relu8 = tf.nn.relu(self.fc8)\n fc9 = self.fc_layer(relu8, 'fc9')\n print((\"build model finished: %ds\" % (time.time() - start_time)))\n return fc9\n\n # self.prob = tf.nn.softmax(self.fc8, name=\"prob\")",
"def image_features_extracter(cnn_encoder):\n\n if cnn_encoder == 'InceptionV3':\n\n image_model = tf.keras.applications.InceptionV3(include_top=False,\n weights='imagenet')\n new_input = image_model.input\n hidden_layer = image_model.layers[-1].output\n\n # TODO: not really sure why this redefinition is necessary (instead of\n # just using image_model directly)\n image_features_extract_model = tf.keras.Model(new_input, hidden_layer)\n\n elif cnn_encoder == 'VGG16':\n\n image_model = tf.keras.applications.VGG16(include_top=False,\n weights='imagenet')\n new_input = image_model.input\n hidden_layer = image_model.get_layer('block5_conv3').output\n\n # TODO: not really sure why this redefinition is necessary (instead of\n # just using image_model directly)\n image_features_extract_model = tf.keras.Model(new_input, hidden_layer)\n\n return image_features_extract_model",
"def create_vgg(input_shape):\n\n # Load a pre-trained VGG19 model trained on 'Imagenet' dataset\n vgg = VGG19(weights=\"imagenet\")\n vgg.outputs = [vgg.layers[9].output]\n\n input_layer = Input(shape=input_shape)\n\n # Extract features\n features = vgg(input_layer)\n\n # Create a Keras model\n model = Model(inputs=[input_layer], outputs=[features])\n return model",
"def get_model(num_class):\n # import VGG model and use it\n model = VGG19(weights = \"imagenet\", include_top=False, input_shape = (img_width, img_height, 3), pooling='max')\n\n # Freeze the layers which you don't want to train. Here I am freezing the first 5 layers.\n for layer in model.layers:\n layer.trainable = False\n\n #Adding custom Layers\n x = model.output\n #x = Flatten()(x)\n #x = Dropout(0.5)(x)\n x = Dense(120, activation=\"relu\")(x)\n x = Dropout(0.5)(x)\n x = Dense(120, activation=\"relu\")(x)\n predictions = Dense(num_class, activation='softmax')(x)\n\n # This is the model we will train\n model = Model(inputs=model.input, outputs=predictions)\n\n model.compile(loss='categorical_crossentropy',\n optimizer='adam',\n metrics=['accuracy'])\n\n callbacks_list = [keras.callbacks.EarlyStopping(monitor='val_acc', patience=10, verbose=1)]\n print(model.summary())\n\n return model",
"def build_cnn_vgg16(num_classes):\n\n inputs = tf.keras.layers.Input(\n shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS)\n )\n\n x = inputs\n x = tf.keras.applications.vgg16.preprocess_input(x)\n vgg16 = tf.keras.applications.VGG16(\n input_shape=(IMAGE_WIDTH, IMAGE_HEIGHT, IMAGE_CHANNELS),\n weights=\"imagenet\",\n include_top=False\n )\n\n vgg16.trainable = False\n x = vgg16(x, training=False)\n\n x = tf.keras.layers.GlobalAveragePooling2D()(x)\n x = tf.keras.layers.Dense(\n units=num_classes,\n activation=tf.keras.activations.softmax\n )(x)\n\n outputs = x\n\n model = tf.keras.Model(\n inputs=inputs,\n outputs=outputs\n )\n\n return model",
"def build_model(img_width,\n\timg_height,\n\tchannels,\n\tnum_classes,\n\tlr=1e-5,\n\tfreeze=False):\n\n\tvgg_model = VGG16(include_top=False, weights='imagenet', input_shape=(img_width, img_height, channels))\n\n\tvgg_output = vgg_model.output\n\tdrop0 = Dropout(0.5)(vgg_output)\n\tflat = Flatten()(drop0)\n\tdense1 = Dense(512, activation='relu')(flat)\n\tdrop1 = Dropout(0.5)(dense1)\n\tpredictions = Dense(num_classes, activation='softmax')(drop1)\n\n\tmodel = Model(inputs=vgg_model.input, outputs=predictions)\n\t\n\tif freeze:\n\t\tfor layer in vgg_model.layers:\n\t\t\tlayer.trainable = False\n\n\tmodel.summary()\n\tadam = Adam(lr=lr, decay=1e-6)\n\tmodel.compile(optimizer=adam, loss='categorical_crossentropy', metrics=['accuracy'])\n\treturn model",
"def load_pretrained_layers(self):\n # Current state of base\n state_dict = self.state_dict()\n param_names = list(state_dict.keys())\n\n # VGG base with pretrained weights\n pretrained_state_dict = torchvision.models.vgg16(pretrained=True).state_dict()\n pretrained_param_names = list(pretrained_state_dict.keys())\n\n # Copy pretrained weights to our current VGG model base\n for i, param in enumerate(param_names[:-4]): # excluding conv6 and conv7 parameters\n state_dict[param] = pretrained_state_dict[pretrained_param_names[i]]\n\n # Convert fc6, fc7 to convolutional layers, and subsample (by decimation) to sizes of conv6 and conv7\n # fc6\n conv_fc6_weight = pretrained_state_dict['classifier.0.weight'].view(4096, 512, 7, 7) # (4096, 512, 7, 7)\n conv_fc6_bias = pretrained_state_dict['classifier.0.bias'] # (4096)\n state_dict['conv6.weight'] = decimate(conv_fc6_weight, m=[4, None, 3, 3]) # (1024, 512, 3, 3)\n state_dict['conv6.bias'] = decimate(conv_fc6_bias, m=[4]) # (1024)\n # fc7\n conv_fc7_weight = pretrained_state_dict['classifier.3.weight'].view(4096, 4096, 1, 1) # (4096, 4096, 1, 1)\n conv_fc7_bias = pretrained_state_dict['classifier.3.bias'] # (4096)\n state_dict['conv7.weight'] = decimate(conv_fc7_weight, m=[4, 4, None, None]) # (1024, 1024, 1, 1)\n state_dict['conv7.bias'] = decimate(conv_fc7_bias, m=[4]) # (1024)\n\n self.load_state_dict(state_dict)\n\n print(\"\\nLoaded base model.\\n\")",
"def create_features(self, cfg_path):\n def parse_cfg(cfg_path):\n blocks = []\n fp = open(cfg_path, 'r')\n block = None\n line = fp.readline()\n while line != '':\n line = line.rstrip()\n if line == '' or line[0] == '#':\n line = fp.readline()\n continue\n elif line[0] == '[':\n if block:\n blocks.append(block)\n block = dict()\n block['type'] = line.lstrip('[').rstrip(']')\n # set default value\n if block['type'] == 'convolutional':\n block['batch_normalize'] = 0\n else:\n key, value = line.split('=')\n key = key.strip()\n if key == 'type':\n key = '_type'\n value = value.strip()\n block[key] = value\n line = fp.readline()\n\n if block:\n blocks.append(block)\n fp.close()\n return blocks\n\n blocks = parse_cfg(cfg_path)\n\n models = nn.Sequential()\n conv_id = 0\n prev_filters = 0\n max_pool_id = 0\n \n for block in blocks:\n if block['type'] == 'net':\n prev_filters = int(block['channels'])\n continue\n elif block['type'] == 'convolutional':\n conv_id += 1\n # is_bn = int(block['batch_normalize']) # extraction.conv.weight has no batch_normalize, but it needed.\n filters = int(block['filters'])\n kernel_size = int(block['size'])\n stride = int(block['stride'])\n is_pad = int(block['pad'])\n pad_size = (kernel_size - 1) // 2 if is_pad else 0\n activation = block['activation']\n models.add_module(f\"conv{conv_id}\", nn.Conv2d(prev_filters, filters, kernel_size, stride, pad_size, bias=False))\n models.add_module(f\"bn{conv_id}\", nn.BatchNorm2d(filters))\n if activation =='leaky':\n models.add_module(f\"leaky{conv_id}\", nn.LeakyReLU(0.1, inplace=True))\n prev_filters = filters\n\n elif block['type'] == 'maxpool':\n max_pool_id += 1\n pool_size = int(block['size'])\n stride = int(block['stride'])\n models.add_module(f\"maxpool{max_pool_id}\", nn.MaxPool2d(pool_size, stride))\n \n # elif block['type'] == 'avgpool':\n # models.add_module(\"avgpool\", nn.AvgPool2d(7))\n\n # elif block['type'] == 'connected':\n # filters = int(block['output'])\n # models.add_module(\"fc\", nn.Linear(prev_filters, filters))\n \n # elif block['type'] == 'softmax':\n # models.add_module(\"softmax\", nn.Softmax())\n\n # print(models)\n return models"
] | [
"0.79228604",
"0.7664166",
"0.7466115",
"0.72785217",
"0.7189622",
"0.71872103",
"0.7153017",
"0.7121094",
"0.699137",
"0.6904944",
"0.6891368",
"0.68058294",
"0.6782009",
"0.6762118",
"0.67149067",
"0.67085296",
"0.6690299",
"0.66350603",
"0.66326374",
"0.6616833",
"0.66008085",
"0.6596637",
"0.6578139",
"0.6545943",
"0.654574",
"0.65321064",
"0.6513062",
"0.6504885",
"0.6486976",
"0.64820457"
] | 0.788697 | 1 |
Returns given vm's/templates's disks collection href or list of disk objects | def getObjDisks(name, get_href=True, is_template=False):
response = get_disk_attachments(
name, 'template' if is_template else 'vm', get_href
)
if get_href:
return response
return get_disk_list_from_disk_attachments(response) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def ListDisks(self) -> Dict[str, 'AZComputeDisk']:\n disks = self.az_account.compute.ListDisks(\n resource_group_name=self.resource_group_name)\n vm_disks = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name).storage_profile\n vm_disks_names = [disk.name for disk in vm_disks.data_disks]\n vm_disks_names.append(vm_disks.os_disk.name)\n return {disk_name: disks[disk_name] for disk_name in vm_disks_names}",
"def ListVdisks(self, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks\"\n return self.client.get(uri, None, headers, query_params, content_type)",
"def ParseDiskResource(resources, name, project, zone, type_):\n if type_ == compute_scopes.ScopeEnum.REGION:\n return resources.Parse(\n name,\n collection='compute.regionDisks',\n params={\n 'project': project,\n 'region': utils.ZoneNameToRegionName(zone)\n })\n else:\n return resources.Parse(\n name,\n collection='compute.disks',\n params={\n 'project': project,\n 'zone': zone\n })",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def get_disk_list_from_disk_attachments(disk_attachments):\n return [\n get_disk_obj_from_disk_attachment(disk_attachment) for\n disk_attachment in disk_attachments\n ]",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def _get_data_volumes(vm_):\n ret = []\n volumes = vm_[\"volumes\"]\n for key, value in volumes.items():\n # Verify the required 'disk_size' property is present in the cloud\n # profile config\n if \"disk_size\" not in volumes[key].keys():\n raise SaltCloudConfigError(\n \"The volume '{}' is missing 'disk_size'\".format(key)\n )\n # Use 'HDD' if no 'disk_type' property is present in cloud profile\n if \"disk_type\" not in volumes[key].keys():\n volumes[key][\"disk_type\"] = \"HDD\"\n\n # Construct volume object and assign to a list.\n volume = Volume(\n name=key,\n size=volumes[key][\"disk_size\"],\n disk_type=volumes[key][\"disk_type\"],\n licence_type=\"OTHER\",\n )\n\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in volumes[key].keys():\n volume.availability_zone = volumes[key][\"disk_availability_zone\"]\n\n ret.append(volume)\n\n return ret",
"def drives(self):\n if \"drives\" in self._prop_dict:\n return DrivesCollectionPage(self._prop_dict[\"drives\"])\n else:\n return None",
"def getTemplateDisk(template_name, alias):\n template_disks = getObjDisks(\n template_name, get_href=False, is_template=True\n )\n for template_disk in template_disks:\n if alias == template_disk.get_alias():\n return template_disk\n raise EntityNotFound(\n \"Didn't find disk %s for template %s\" % (alias, template_name)\n )",
"def getVmDisk(vmName, alias=None, disk_id=None):\n value = None\n if disk_id:\n prop = \"id\"\n value = disk_id\n elif alias:\n prop = \"name\"\n value = alias\n else:\n logger.error(\"No disk identifier or name was provided\")\n return None\n return get_disk_obj_from_disk_attachment(\n get_disk_attachment(vmName, value, prop)\n )",
"def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n return findall(\"[A-Z]:\", str(logicaldisks.stdout))",
"def get_persistent_disks(k8s_ctx: str, dry_run: bool = False) -> List[str]:\n cmd = f'kubectl --context={k8s_ctx} get pv -o json'\n if dry_run:\n logging.info(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n pds = json.loads(p.stdout.decode())\n return [i['spec']['csi']['volumeHandle'].split('/')[-1] for i in pds['items']]\n return list()",
"def mpt():\n lbl_drives = ['device','mountpoint','fstype']\n disks = [d[0:3] for d in psutil.disk_partitions()]\n drives = [dict(zip(lbl_drives,ds)) for ds in disks]\n return [d['mountpoint']for d in drives]",
"async def get_disks(self, oid):\n pool = await self.query([('id', '=', oid)], {'get': True})\n if not pool['is_decrypted']:\n yield\n async for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):\n yield i",
"def get_storage_domain_diskssnapshots_objects(storagedomain, get_href=False):\n from art.rhevm_api.tests_lib.low_level.storagedomains import (\n get_storage_domain_obj\n )\n storage_domain_object = get_storage_domain_obj(storagedomain)\n return DISK_SNAPSHOT_API.getElemFromLink(\n storage_domain_object,\n link_name='disksnapshots',\n attr='disk_snapshot',\n get_href=get_href,\n )",
"def getDisk(self, item):\n return self.disks[item]",
"def get_volumes(instance):\n if instance.cloud == 'aws':\n client = boto3.session.Session().client('ec2', instance.region)\n devices = client.describe_instance_attribute(\n InstanceId=instance.id, Attribute='blockDeviceMapping').get('BlockDeviceMappings', [])\n volumes = client.describe_volumes(VolumeIds=[device['Ebs']['VolumeId']\n for device in devices if device.get('Ebs', {}).get('VolumeId')]).get('Volumes', [])\n return {volume['Attachments'][0]['Device']: {'size': volume['Size'], 'volume_type': volume['VolumeType']} for volume in volumes}\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n volumes = {}\n for disk in compute.instances().get(instance=instance.id,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n index = disk['index']\n name = disk['deviceName'] if disk['deviceName'] not in [u'persistent-disk-0', 'boot'] else instance.id\n if 'local-ssd' in disk['deviceName']:\n size = 375.0\n disk_type = 'local-ssd'\n else:\n size = float(disk.get('diskSizeGb', 0.))\n disk_type = 'pd-ssd'\n volumes[index] = {'size': size,\n 'type': disk['type'],\n 'deviceName': disk['deviceName'],\n 'interface': disk['interface'],\n 'diskType': disk_type}\n return volumes\n raise ValueError('Unknown cloud %s' % instance.cloud)",
"def list_vm_template(client, private_cloud, resource_pool, location):\n return client.list(private_cloud, location, resource_pool)",
"def get_overage_disks_json(disk_list):\n\t\tpass",
"def get_snapshot_disks_by_snapshot_obj(snapshot):\n return DISKS_API.getElemFromLink(snapshot)",
"def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)",
"def get_disk_type(vm_):\n return config.get_cloud_config_value(\n \"disk_type\", vm_, __opts__, default=\"HDD\", search_global=False\n )"
] | [
"0.6487518",
"0.6178933",
"0.60788083",
"0.59857213",
"0.5928848",
"0.5895868",
"0.58672506",
"0.5854115",
"0.58504194",
"0.57318586",
"0.5724269",
"0.5638056",
"0.5616229",
"0.5597307",
"0.55598325",
"0.55260617",
"0.55246115",
"0.54903316",
"0.5446013",
"0.5396282",
"0.53787184",
"0.5365633",
"0.532019",
"0.53178257",
"0.52834785",
"0.5282716",
"0.52691036",
"0.5234597",
"0.52334845",
"0.52328247"
] | 0.7348018 | 0 |
Returns a Disk object from a disk attached to a vm | def getVmDisk(vmName, alias=None, disk_id=None):
value = None
if disk_id:
prop = "id"
value = disk_id
elif alias:
prop = "name"
value = alias
else:
logger.error("No disk identifier or name was provided")
return None
return get_disk_obj_from_disk_attachment(
get_disk_attachment(vmName, value, prop)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def disk(self, disk_id):\n try:\n return self._disks[disk_id]\n except KeyError:\n util.log_error(\n \"couldn't find disk {} on vm {}\".format(disk_id, self.id)\n )\n raise",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def get_disk_obj(disk_alias, attribute='name'):\n return DISKS_API.find(disk_alias, attribute=attribute)",
"def detachDisk(positive, alias, vmName):\n logger.info(\"Detaching disk %s from vm %s\", alias, vmName)\n disk_attachment = get_disk_attachment(vmName, alias, attr='name')\n return DISK_ATTACHMENTS_API.delete(disk_attachment, positive)",
"def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk",
"def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None",
"def createVM(self ,disk ,name):\n return",
"def parseDisk(disk):\n\n\tdict={}\n\n\td = disk.split(',')\n\tif len(d) != 3:\n rocks.commands.Abort('Invalid disk specification.'\n\t\t\t\t' Please see rocks add host vm help.')\n\n\tdict['device'] = d[1]\n\tdict['mode'] = d[2]\n\n\te = d[0].split(':')\n\tdict['vbd_type'] = ':'.join(e[0:-1])\n\n\tif dict['vbd_type'] == 'phy':\n\t\tdict['prefix'] = ''\n\t\tdict['name'] = e[-1]\t# allows for '/' in name for LVM\n\telse:\n\t\tdict['prefix'] = os.path.dirname(e[-1])\n\t\tdict['name'] = os.path.basename(e[-1])\n\n\treturn dict",
"def getDisk(self, item):\n return self.disks[item]",
"def get_disk(disk_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskResult:\n __args__ = dict()\n __args__['diskName'] = disk_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:compute/v20230402:getDisk', __args__, opts=opts, typ=GetDiskResult).value\n\n return AwaitableGetDiskResult(\n bursting_enabled=pulumi.get(__ret__, 'bursting_enabled'),\n bursting_enabled_time=pulumi.get(__ret__, 'bursting_enabled_time'),\n completion_percent=pulumi.get(__ret__, 'completion_percent'),\n creation_data=pulumi.get(__ret__, 'creation_data'),\n data_access_auth_mode=pulumi.get(__ret__, 'data_access_auth_mode'),\n disk_access_id=pulumi.get(__ret__, 'disk_access_id'),\n disk_iops_read_only=pulumi.get(__ret__, 'disk_iops_read_only'),\n disk_iops_read_write=pulumi.get(__ret__, 'disk_iops_read_write'),\n disk_m_bps_read_only=pulumi.get(__ret__, 'disk_m_bps_read_only'),\n disk_m_bps_read_write=pulumi.get(__ret__, 'disk_m_bps_read_write'),\n disk_size_bytes=pulumi.get(__ret__, 'disk_size_bytes'),\n disk_size_gb=pulumi.get(__ret__, 'disk_size_gb'),\n disk_state=pulumi.get(__ret__, 'disk_state'),\n encryption=pulumi.get(__ret__, 'encryption'),\n encryption_settings_collection=pulumi.get(__ret__, 'encryption_settings_collection'),\n extended_location=pulumi.get(__ret__, 'extended_location'),\n hyper_v_generation=pulumi.get(__ret__, 'hyper_v_generation'),\n id=pulumi.get(__ret__, 'id'),\n last_ownership_update_time=pulumi.get(__ret__, 'last_ownership_update_time'),\n location=pulumi.get(__ret__, 'location'),\n managed_by=pulumi.get(__ret__, 'managed_by'),\n managed_by_extended=pulumi.get(__ret__, 'managed_by_extended'),\n max_shares=pulumi.get(__ret__, 'max_shares'),\n name=pulumi.get(__ret__, 'name'),\n network_access_policy=pulumi.get(__ret__, 'network_access_policy'),\n optimized_for_frequent_attach=pulumi.get(__ret__, 'optimized_for_frequent_attach'),\n os_type=pulumi.get(__ret__, 'os_type'),\n property_updates_in_progress=pulumi.get(__ret__, 'property_updates_in_progress'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n public_network_access=pulumi.get(__ret__, 'public_network_access'),\n purchase_plan=pulumi.get(__ret__, 'purchase_plan'),\n security_profile=pulumi.get(__ret__, 'security_profile'),\n share_info=pulumi.get(__ret__, 'share_info'),\n sku=pulumi.get(__ret__, 'sku'),\n supported_capabilities=pulumi.get(__ret__, 'supported_capabilities'),\n supports_hibernation=pulumi.get(__ret__, 'supports_hibernation'),\n tags=pulumi.get(__ret__, 'tags'),\n tier=pulumi.get(__ret__, 'tier'),\n time_created=pulumi.get(__ret__, 'time_created'),\n type=pulumi.get(__ret__, 'type'),\n unique_id=pulumi.get(__ret__, 'unique_id'),\n zones=pulumi.get(__ret__, 'zones'))",
"def GetDisk(self, disk_name: str) -> 'AZComputeDisk':\n disks = self.ListDisks()\n if disk_name not in disks:\n raise errors.ResourceNotFoundError(\n 'Disk {0:s} was not found in instance {1:s}'.format(\n disk_name, self.resource_id), __name__)\n return disks[disk_name]",
"def import_disk(\n self,\n backend_vm_id,\n backend_disk_id,\n save=True,\n project=None,\n ):\n try:\n backend_disk = self.client.get_disk(backend_vm_id, backend_disk_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n disk = self._backend_disk_to_disk(backend_disk, backend_disk_id)\n disk.service_settings = self.settings\n disk.project = project\n if save:\n disk.save()\n\n return disk",
"def ParseDiskResourceFromAttachedDisk(resources, attached_disk):\n try:\n disk = resources.Parse(\n attached_disk.source, collection='compute.regionDisks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n try:\n disk = resources.Parse(attached_disk.source, collection='compute.disks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n raise cloud_resources.InvalidResourceException('Unable to parse [{}]'.format(\n attached_disk.source))",
"def prepare_disk_attachment_object(disk_id=None, **kwargs):\n disk = kwargs.pop(\"disk\", None)\n disk_obj = disk if disk else prepare_ds_object(\"Disk\", id=disk_id)\n return prepare_ds_object(\"DiskAttachment\", disk=disk_obj, **kwargs)",
"def _get_system_volume(vm_):\n\n # Override system volume size if 'disk_size' is defined in cloud profile\n disk_size = get_size(vm_)[\"disk\"]\n if \"disk_size\" in vm_:\n disk_size = vm_[\"disk_size\"]\n\n # Construct the system volume\n volume = Volume(\n name=\"{} Storage\".format(vm_[\"name\"]),\n size=disk_size,\n disk_type=get_disk_type(vm_),\n )\n\n if \"image_password\" in vm_:\n image_password = vm_[\"image_password\"]\n volume.image_password = image_password\n\n # Retrieve list of SSH public keys\n ssh_keys = get_public_keys(vm_)\n volume.ssh_keys = ssh_keys\n\n if \"image_alias\" in vm_.keys():\n volume.image_alias = vm_[\"image_alias\"]\n else:\n volume.image = get_image(vm_)[\"id\"]\n # Set volume availability zone if defined in the cloud profile\n if \"disk_availability_zone\" in vm_:\n volume.availability_zone = vm_[\"disk_availability_zone\"]\n\n return volume",
"def create_fs_on_disk(vm_name, disk_alias, executor=None):\n if ll_vms.get_vm_state(vm_name) == config.VM_DOWN:\n ll_vms.startVm(\n True, vm_name, wait_for_status=config.VM_UP,\n wait_for_ip=True\n )\n if not executor:\n executor = get_vm_executor(vm_name)\n\n logger.info(\n \"Find disk logical name for disk with alias %s on vm %s\",\n disk_alias, vm_name\n )\n disk_logical_volume_name = get_logical_name_by_vdsm_client(\n vm_name, disk_alias\n )\n if not disk_logical_volume_name:\n # This function is used to test whether logical volume was found,\n # raises an exception if it wasn't found\n message = \"Failed to get %s disk logical name\" % disk_alias\n logger.error(message)\n return False, message\n\n logger.info(\n \"The logical volume name for the requested disk is: '%s'\",\n disk_logical_volume_name\n )\n\n logger.info(\n \"Creating label: %s\", CREATE_DISK_LABEL_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_LABEL_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating disk label: %s\", out)\n if rc:\n return rc, out\n logger.info(\n \"Creating partition %s\",\n CREATE_DISK_PARTITION_CMD % disk_logical_volume_name\n )\n rc, out, _ = executor.run_cmd(\n (CREATE_DISK_PARTITION_CMD % disk_logical_volume_name).split()\n )\n logger.info(\"Output after creating partition: %s\", out)\n if rc:\n return rc, out\n # '1': create the fs as the first partition\n # '?': createFileSystem will return a random mount point\n logger.info(\"Creating a File-system on first partition\")\n mount_point = create_filesystem(\n vm_name=vm_name, device=disk_logical_volume_name, partition='1',\n fs=FILESYSTEM, executor=executor\n )\n return True, mount_point",
"def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")",
"def disk(self):\n return self.__disk",
"def get_disk_type(vm_):\n return config.get_cloud_config_value(\n \"disk_type\", vm_, __opts__, default=\"HDD\", search_global=False\n )",
"def detachDiskFromMinipad(self , disk):\n return",
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def detach_disk_from_vm(self, vm_ref, instance_name, device):\n client_factory = self._session._get_vim().client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device)\n disk_key = device.key\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_detach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())",
"def GetDisk(\n self,\n disk_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeDisk':\n disks = self.ListDisks(resource_group_name=resource_group_name)\n if disk_name not in disks:\n raise errors.ResourceNotFoundError(\n 'Disk {0:s} was not found in subscription {1:s}'.format(\n disk_name, self.az_account.subscription_id), __name__)\n return disks[disk_name]",
"def get_os_virtual_hard_disk(self):\n if self.is_vm_image():\n return None\n i = self.virtual_environment[self.T_I]\n sa = self.virtual_environment[self.T_SA]\n c = self.virtual_environment[self.T_C]\n now = datetime.datetime.now()\n blob = self.BLOB_BASE % (i[self.T_I_N],\n str(now.year),\n str(now.month),\n str(now.day),\n str(now.hour),\n str(now.minute),\n str(now.second),\n str(current_thread().ident))\n media_link = self.MEDIA_BASE % (sa[self.T_SA_SN],\n sa[self.T_SA_UB],\n c,\n blob)\n os_virtual_hard_disk = OSVirtualHardDisk(i[self.T_I_N], media_link)\n return os_virtual_hard_disk",
"def get_disk_details(disk):\n details = {}\n script = [\n 'select disk {}'.format(disk['Number']),\n 'detail disk']\n\n # Run\n try:\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n # Remove empty lines\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Set disk name\n details['Name'] = tmp[4]\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n return details",
"def get_disk(self, instance=None, data=None, **kwargs):\n instance = instance if instance else self.instance\n boot_from_volume = data['boot_from_volume'] if data else self.data['boot_from_volume']\n is_ephemeral = self.__get_flavor_from_instance(instance).ephemeral > 0\n\n if not boot_from_volume:\n if self.config[\"ephemeral_drives\"]['ceph']:\n diff_path = self.__get_instance_diff_path(instance, False, True)\n ephemeral = self.__get_instance_diff_path(instance, True, True) if is_ephemeral else None\n self.__create_temp_directory(self.config['temp'])\n self.data['disk'] = {\n 'type': CEPH,\n 'host': self.config['host'],\n 'diff_path': self.__transfer_rbd_to_glance(diff_path,\n self.config['temp'],\n self.config['ephemeral_drives']['convert_diff_file'],\n \"diff_path\"),\n 'ephemeral': self.__transfer_rbd_to_file(ephemeral,\n self.config['temp'],\n self.config['ephemeral_drives']['convert_ephemeral_drive'],\n \"disk.local\")\n }\n else:\n diff_path = self.__get_instance_diff_path(instance, False, False)\n ephemeral = self.__get_instance_diff_path(instance, True, False) if is_ephemeral else None\n self.data['disk'] = {\n 'type': REMOTE_FILE,\n 'host': getattr(instance, 'OS-EXT-SRV-ATTR:host'),\n 'diff_path': diff_path,\n 'ephemeral': ephemeral\n }\n else:\n ephemeral = self.__get_instance_diff_path(instance, True, self.config[\"ephemeral_drives\"]['ceph']) \\\n if is_ephemeral else None\n self.__create_temp_directory(self.config['temp'])\n self.data['disk'] = {\n 'type': CEPH if self.config[\"ephemeral_drives\"]['ceph'] else REMOTE_FILE,\n 'host': self.config['host'] if self.config[\"ephemeral_drives\"]['ceph']\n else getattr(instance, 'OS-EXT-SRV-ATTR:host'),\n 'ephemeral': self.__transfer_rbd_to_file(ephemeral,\n self.config['temp'],\n self.config['ephemeral_drives']['convert_ephemeral_drive'],\n \"disk.local\")\n if self.config[\"ephemeral_drives\"]['ceph'] else ephemeral\n }\n self.data[\"boot_volume_size\"] = {}\n return self",
"def launch_vm(vm_id, vm_metadata):\n print('\\nCreating disk and vm with ID:', vm_id)\n vm_metadata['vm_id'] = vm_id\n ram_mbs, num_cpus, num_gpus = required_resources_for_method(\n vm_metadata['method'],\n bool(vm_metadata['pretrained_r_nets_path']))\n\n create_disk_cmd = (\n 'gcloud compute disks create '\n '\"{disk_name}\" --zone \"{zone}\" --source-snapshot \"{source_snapshot}\" '\n '--type \"pd-standard\" --project=\"{gcloud_project}\" '\n '--size=200GB'.format(\n disk_name=vm_id,\n zone=ZONE,\n source_snapshot=SOURCE_SNAPSHOT,\n gcloud_project=GCLOUD_PROJECT,\n ))\n print('Calling', create_disk_cmd)\n # Don't fail if disk already exists.\n subprocess.call(create_disk_cmd, shell=True)\n\n create_instance_cmd = (\n 'gcloud compute --project={gcloud_project} instances create '\n '{instance_name} --zone={zone} --machine-type={machine_type} '\n '--subnet=default --network-tier=PREMIUM --maintenance-policy=TERMINATE '\n '--service-account={service_account} '\n '--scopes=storage-full,compute-rw '\n '--accelerator=type=nvidia-tesla-p100,count={gpu_count} '\n '--disk=name={disk_name},device-name={disk_name},mode=rw,boot=yes,'\n 'auto-delete=yes --restart-on-failure '\n '--metadata-from-file startup-script=./scripts/vm_drop_root.sh '\n '--metadata {vm_metadata} --async'.format(\n instance_name=vm_id,\n zone=ZONE,\n machine_type='custom-{num_cpus}-{ram_mbs}'.format(\n num_cpus=num_cpus, ram_mbs=ram_mbs),\n gpu_count=num_gpus,\n disk_name=vm_id,\n vm_metadata=(\n ','.join('{}={}'.format(k, v) for k, v in vm_metadata.items())),\n gcloud_project=GCLOUD_PROJECT,\n service_account=SERVICE_ACCOUNT,\n ))\n\n print('Calling', create_instance_cmd)\n subprocess.check_call(create_instance_cmd, shell=True)",
"def create_disk_instance(device, disk_params):\n\n domain_name = device[\"name\"]\n disk_instance_path = \"\"\n\n if \"type\" in disk_params:\n if disk_params[\"type\"] == \"image\" and \"image_id\" in disk_params:\n logger.debug(\"Creating secondary/tertiary Disk information\")\n image_id = disk_params[\"image_id\"]\n disk_image = Image.objects.get(pk=image_id)\n disk_base_path = settings.MEDIA_ROOT + \"/\" + disk_image.filePath.url\n\n disk_instance_path = osUtils.get_instance_path_from_image(disk_base_path,\n domain_name + \"_secondary_image.img\"\n )\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_thin_provision_instance(disk_base_path,\n domain_name + \"_secondary_image.img\"\n ):\n raise Exception(\"Could not create image instance for image: \" + disk_base_path)\n\n elif disk_params[\"type\"] == \"blank\":\n disk_instance_path = settings.MEDIA_ROOT \\\n + \"/user_images/instances/\" + domain_name + \"_secondary_blank.img\"\n\n disk_size = \"16G\"\n if \"size\" in disk_params:\n disk_size = disk_params[\"size\"]\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_blank_image(disk_instance_path, disk_size):\n raise Exception(\"Could not create image instance for image: \" + disk_instance_path)\n\n elif disk_params[\"type\"] == \"config_drive\":\n # let's check if config_drive is supported for this vm_type!\n # this is usually used for vMX in openstack, however, we can also use it here for KVM deployments\n disk_instance_path = ''\n if \"configDriveSupport\" in device and device[\"configDriveSupport\"] is True:\n\n logger.debug(\"Lets create a config-drive!\")\n\n # keep a dict of files with format: filename: filecontents\n files = dict()\n params = device[\"configDriveParams\"]\n if \"configDriveParamsFile\" in device and device[\"configDriveParamsFile\"]:\n logger.debug(\"Using inline config_drive format\")\n # behavior change 12-28-2016 - allow passing a list of templates and destinations\n # instead of defining the params directly on the device object\n # if the configDriveParams is a dict, then this is an older topology, leave this code here\n # to still support them - otherwise fall through to the isinstance check for list type for\n # newer style configuration\n if isinstance(params, dict):\n name = device[\"configDriveParamsFile\"]\n file_data = \"\"\n # config drive params are usually a dict - to make json serialization easier\n # for our purposes here, let's just make a file with a single key: value per line\n # note, we can add a serialization format to the vm_type.js if needed here\n # only currently used for /boot/loader.conf in vmx and riot\n for k in params:\n file_data += '%s=\"%s\"\\n' % (k, params[k])\n\n files[name] = file_data\n\n # junos customization\n # let's also inject a default config here as well if possible!\n if \"junos\" in device[\"type\"]:\n logger.debug(\"Creating Junos configuration template\")\n junos_config = osUtils.get_junos_default_config_template(device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n if junos_config is not None:\n files[\"/juniper.conf\"] = junos_config\n\n # check for new (12-28-2016) style config drive params definition\n if isinstance(params, list):\n logger.debug(\"params is a list\")\n for p in params:\n if \"template\" in p and \"destination\" in p:\n file_data = None\n file_data = osUtils.compile_config_drive_params_template(\n p[\"template\"],\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"]\n )\n if file_data is not None:\n files[p[\"destination\"]] = file_data\n\n disk_instance_path = osUtils.create_config_drive(device[\"name\"], files)\n if disk_instance_path is None:\n disk_instance_path = ''\n\n logger.debug(\"Using %s\" % disk_instance_path)\n return disk_instance_path"
] | [
"0.7119601",
"0.7092368",
"0.70414054",
"0.6679714",
"0.6640973",
"0.650309",
"0.6392032",
"0.6355302",
"0.629178",
"0.6273012",
"0.6256461",
"0.6222913",
"0.62218547",
"0.6218388",
"0.618372",
"0.61645603",
"0.61220926",
"0.6121833",
"0.609453",
"0.6032805",
"0.60152584",
"0.6008411",
"0.60057914",
"0.5972369",
"0.5967228",
"0.5940259",
"0.59225357",
"0.5911856",
"0.5896098",
"0.588873"
] | 0.769929 | 0 |
Returns disk from template collection | def getTemplateDisk(template_name, alias):
template_disks = getObjDisks(
template_name, get_href=False, is_template=True
)
for template_disk in template_disks:
if alias == template_disk.get_alias():
return template_disk
raise EntityNotFound(
"Didn't find disk %s for template %s" % (alias, template_name)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def template_storage_and_column():\r\n root = join(dirname(__file__), \"app\", \"templates\")\r\n storage = FileSystemStorage(location=root, base_url=\"/baseurl/\")\r\n column = tables.FileColumn(attrs={\"span\": {\"class\": \"span\"},\r\n \"a\": {\"class\": \"a\"}})\r\n yield column, storage",
"def save_path(self):\n return self.template.manager.render_template_txt(self.path, self.template)",
"def template():\n\n return s3_rest_controller(rheader = s3db.dc_rheader)",
"def getFile(self):\n #try to redetect the filetype\n vim.command(\"filetype detect\")\n #return the filetype\n filetype = vim.eval(\"&ft\")\n #filetype = vim.command(\"&ft\")\n if filetype:\n for file in self.template_files:\n if filetype.lower() in file.lower():\n self.hasTemplate = True\n return open(self.template_folder + \"/\" + file, 'r')\n return None",
"def disk_fxt(request):\n disk = request.param\n disk.download()\n return disk",
"def template():\n return ENVIVIRTUALIZABLEURI('DEFile')",
"def get_wrapper_template():\n\n return wrapper_templates.template_collection",
"def get_template_files(fs, template_type):\n # no template fitting for null runs\n if fs[\"null_run\"]:\n template_type = None\n\n if \"template_type\" in fs:\n if template_type == fs[\"template_type\"]:\n return\n\n fs[\"template_type\"] = template_type\n\n # find all corresponding foreground templates\n if template_type is None:\n fs[\"template_root\"] = None\n fs[\"template_root2\"] = None\n fs[\"template_files\"] = None\n fs[\"template_files2\"] = None\n fs[\"template_noise_root\"] = None\n fs[\"template_noise_root2\"] = None\n fs[\"template_noise_files\"] = None\n fs[\"template_noise_files2\"] = None\n fs[\"num_template\"] = 0\n fs[\"num_template_noise\"] = 0\n else:\n num_template_noise = None\n for hm in [\"1\", \"2\"]:\n suff = \"\" if hm == \"1\" else \"2\"\n troot = os.path.join(\n fs[\"data_root\"],\n \"templates_{}\".format(template_type),\n \"halfmission-{}\".format(hm),\n )\n ### this block is so sims with template type like\n # 353_100_gauss_003 can use ensemble in 353_100_gauss\n tp = template_type.split(\"_\")\n ttype = template_type\n if tp[-1].isdigit():\n if ttype[-7:] not in [\"353_100\", \"217_100\"]:\n ttype = \"_\".join(tp[:-1])\n\n tnroot = os.path.join(\n fs[\"data_root\"],\n \"templates_noise_{}\".format(ttype),\n \"halfmission-{}\".format(hm),\n )\n\n tfiles = []\n tnfiles = []\n for f in fs[\"map_files\"]:\n nfile = f.replace(fs[\"map_root\"], troot)\n if not os.path.exists(nfile):\n raise OSError(\"Missing hm-{} template for {}\".format(hm, f))\n tfiles.append(nfile)\n nfiles = sorted(\n glob.glob(\n f.replace(fs[\"map_root\"], tnroot).replace(\n \".fits\", \"_*.fits\"\n )\n )\n )\n if not len(nfiles):\n raise OSError(\n \"Missing hm-{} template noise for {}\".format(hm, f)\n )\n tnfiles.append(nfiles)\n if num_template_noise is not None:\n if len(nfiles) != num_template_noise:\n raise OSError(\n \"Wrong number of template noise sims. \"\n \"Found {} files, expected {}.\".format(\n len(nfiles), num_template_noise\n )\n )\n\n num_template_noise = len(nfiles)\n\n tfiles = np.asarray(tfiles)\n tnfiles = np.asarray(tnfiles)\n fs[\"template_root{}\".format(suff)] = troot\n fs[\"template_files{}\".format(suff)] = tfiles\n fs[\"template_noise_root{}\".format(suff)] = tnroot\n fs[\"template_noise_files{}\".format(suff)] = tnfiles\n\n fs[\"num_template\"] = len(fs[\"template_files\"])\n fs[\"num_template_noise\"] = num_template_noise\n self.log(\n \"Found {} templates in {}\".format(\n fs[\"num_template\"], fs[\"template_root\"]\n ),\n \"info\",\n )\n self.log(\n \"Found {} template noise files in {}\".format(\n fs[\"num_template_noise\"], fs[\"template_noise_root\"]\n ),\n \"info\",\n )\n self.log(\"Template files: {}\".format(fs[\"template_files\"]), \"debug\")\n\n fields = [\n \"template_type\",\n \"template_root\",\n \"template_root2\",\n \"template_files\",\n \"template_files2\",\n \"template_noise_root\",\n \"template_noise_root2\",\n \"template_noise_files\",\n \"template_noise_files2\",\n \"num_template\",\n \"num_template_noise\",\n ]\n for k in fields:\n setattr(self, k, fs[k])",
"def select(self, container: FileContainer) -> FileCollection:",
"def get_document(self):\n\t\tif(self.fs.tmp_dir):\n\t\t\tfull_filename = self.fs.tmp_dir + os.sep + self.fs.get_document()\n\t\telse:\n\t\t\tfull_filename = self.fs.get_document()\n\t\t\n\t\treturn full_filename",
"def simple_files_data(tmpdir):\n return simple(tmpdir)[\"data\"]",
"def get_template(self, template):\n\n\n env = Environment(\n loader=FileSystemLoader('templates')\n )\n return env.get_template(template)",
"def _simple_files(tmpdir):\n return simple(tmpdir)[\"files\"]",
"def fusion_api_get_storage_volumes_template(self, uri=None, param='', api=None, headers=None):\n return self.template.get(uri=uri, api=api, headers=headers, param=param)",
"def get_default_template(env):\n return env.from_string(\n \"\"\"\\\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }}|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.standard_information.accessed|unixtimestampformat }}|{{ record.standard_information.modified|unixtimestampformat }}|{{ record.standard_information.changed|unixtimestampformat }}|{{ record.standard_information.created|unixtimestampformat }}\n{% endif %}\n{% if record.standard_information and record.filename_information %}\n0|{{ prefix }}{{ record.path }} (filename)|{{ record.inode }}|0|{{ record.standard_information.owner_id }}|0|{{ record.size }}|{{ record.filename_information.accessed|unixtimestampformat }}|{{ record.filename_information.modified|unixtimestampformat }}|{{ record.filename_information.changed|unixtimestampformat }}|{{ record.filename_information.created|unixtimestampformat }}\n{% endif %}\n{% for e in record.indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n{% for e in record.slack_indx_entries %}\n0|{{ prefix }}{{ record.path }}\\\\{{ e.name }} (slack-INDX)|{{ e.inode }}|0|0|0|{{ e.logical_size }}|{{ e.accessed|unixtimestampformat }}|{{ e.modified|unixtimestampformat }}|{{ e.changed|unixtimestampformat }}|{{ e.created|unixtimestampformat }}\n{% endfor %}\n\"\"\"\n )",
"def get_template(type):\n # read model options file from Cloud Storage\n content = storage.read_file('templates/' + type + '.yaml')\n return Response(content, status=200, mimetype='application/text')",
"def get_templates(self):\n\n\t\tif not os.path.isdir('./repo'): os.mkdir('./repo')\n\t\ttemps = self.settings['template']\n\t\t#---ensure that the template object is always in a list\n\t\tif len(temps) == 2 and type(temps[0])==str and type(temps[1])==str: temps = [temps]\n\t\tself.template = []\n\t\tfor t in temps:\n\t\t\tprint 'retrieving '+str(t[0])\n\t\t\t#---check if in repo and move\n\t\t\tif not os.path.isfile(self.rootdir+t[0]+'.pdb') and os.path.isfile('./repo/'+t[0]+'.pdb'):\n\t\t\t\tcopy('./repo/'+t[0]+'.pdb',self.rootdir+t[0]+'.pdb')\n\t\t\t\t#---fasta retrieval is deprecated\n\t\t\t\tif 0: copy('./repo/'+t[0]+'.fasta',self.rootdir+t[0]+'.fasta')\n\t\t\telif not os.path.isfile(self.rootdir+t[0]+'.pdb'):\n\t\t\t\tresponse = urllib2.urlopen('http://www.rcsb.org/pdb/files/'+t[0]+'.pdb')\n\t\t\t\tpdbfile = response.read()\n\t\t\t\twith open(self.rootdir+t[0]+'.pdb','w') as fp: fp.write(pdbfile)\n\t\t\t\tcopy(self.rootdir+t[0]+'.pdb','./repo/'+t[0]+'.pdb')\n\t\t\tself.template.append(t)",
"def GetTemplate(self, _page_data):\n return self.template",
"def _GetDiskContents(self, layer):\n # type: (Sdf.Layer) -> str\n # with USD Issue #253 solved, we can do a cheaper check of just\n # comparing time stamps and getting contents only if needed.\n\n if not layer.realPath:\n # New() or anonymous layer that cant be loaded from disk.\n return None\n\n # TODO: Is it safe to ChangeBlock this content swapping?\n currentContents = layer.ExportToString()\n # fetch on disk contents for comparison\n layer.Reload()\n diskContents = layer.ExportToString()\n # but then restore users edits\n if diskContents != currentContents:\n layer.ImportFromString(currentContents)\n return diskContents",
"def get_input_contents(self):\n try:\n ret_files = []\n coll = self.collections[self._primary_input_collection]\n ret_file = {'coll_id': coll['coll_id'],\n 'scope': coll['scope'],\n 'name': coll['name'],\n 'bytes': coll.coll_metadata['bytes'],\n 'adler32': None,\n 'min_id': 0,\n 'max_id': coll.coll_metadata['total_files'],\n 'content_type': ContentType.File,\n 'content_metadata': {'total_files': coll['coll_metadata']['total_files']}\n }\n ret_files.append(ret_file)\n return ret_files\n except Exception as ex:\n self.logger.error(ex)\n self.logger.error(traceback.format_exc())\n raise exceptions.IDDSException('%s: %s' % (str(ex), traceback.format_exc()))",
"def template(self):\n with open(self.compute.submission_template, \"r\") as f:\n return f.read()",
"def get_template(self, name):\n with open(name, 'r+') as open_f:\n template_content = open_f.read()\n return template_content",
"def extract_template(temp_dir, fea_type):\n kps = []\n descriptors = np.array([])\n in_path = temp_dir + 'imgs/' # images\n names = os.listdir(in_path)\n for i, name in enumerate(names):\n img = cv2.imread(in_path + name, 0)\n if any(np.array(img.shape) > 1000):\n img = cv2.resize(img, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n print(img.shape)\n kp, des = get_des(fea_type, img)\n if descriptors.size == 0:\n kps = kp\n descriptors = des\n else:\n kps.extend(kp)\n descriptors = np.vstack((descriptors, des))\n\n print(\"template descriptors shape: \" + str(descriptors.shape))\n with open(temp_dir + fea_type + '_template_0.pickle', 'wb') as ff:\n pickle.dump(descriptors, ff)\n\n # with open(temp_dir + fea_type + '_template_0.pickle', 'rb') as f:\n # template = pickle.load(f)\n\n return",
"def _get_template_data(vm_data: Dict[str, Any], span: Span) -> Optional[Dict[str, Any]]:\n vm_id = vm_data['id']\n Windows.logger.debug(f'Compiling template data for VM #{vm_id}')\n data: Dict[str, Any] = {key: None for key in Windows.template_keys}\n\n data['vm_identifier'] = f'{vm_data[\"project\"][\"id\"]}_{vm_id}'\n data['image_answer_file_name'] = vm_data['image']['answer_file_name']\n\n data['image_filename'] = vm_data['image']['filename']\n # check if file exists at /mnt/images/HyperV/VHDXs/\n path = '/mnt/images/HyperV/VHDXs/'\n child_span = opentracing.tracer.start_span('vm_image_file_download', child_of=span)\n if not Windows.check_image(data['image_filename'], path):\n # download the file\n downloaded, errors = Windows.download_image(data['image_filename'], path)\n if not downloaded:\n for error in errors:\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n child_span.finish()\n\n # RAM is needed in MB for the builder but we take it in in GB (1024, not 1000)\n data['ram'] = vm_data['ram'] * 1024\n data['cpu'] = vm_data['cpu']\n data['dns'] = vm_data['dns']\n\n # Generate encrypted passwords\n data['admin_password'] = Windows._password_generator(size=12)\n # Also save the password back to the VM data dict\n vm_data['admin_password'] = data['admin_password']\n\n # Check for the primary storage\n if not any(storage['primary'] for storage in vm_data['storages']):\n error = 'No primary storage drive found. Expected one primary storage drive'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n data['storages'] = vm_data['storages']\n data['storage_type'] = vm_data['storage_type']\n\n # Get the Networking details\n data['vlans'] = []\n data['ip_addresses'] = []\n data['default_ips'] = []\n data['default_gateway'] = ''\n data['default_netmask_int'] = ''\n data['default_vlan'] = ''\n\n # The private IPs for the VM will be the one we need to pass to the template\n vm_data['ip_addresses'].reverse()\n ip_addresses = []\n subnets = []\n for ip in vm_data['ip_addresses']:\n if IPAddress(ip['address']).is_private():\n ip_addresses.append(ip)\n subnets.append({\n 'address_range': ip['subnet']['address_range'],\n 'vlan': ip['subnet']['vlan'],\n 'id': ip['subnet']['id'],\n })\n # Removing duplicates\n subnets = [dict(tuple_item) for tuple_item in {tuple(subnet.items()) for subnet in subnets}]\n # sorting nics (each subnet is one nic)\n for subnet in subnets:\n non_default_ips = []\n gateway, netmask_int = subnet['address_range'].split('/')\n vlan = str(subnet['vlan'])\n data['vlans'].append(vlan)\n\n for ip_address in ip_addresses:\n address = ip_address['address']\n if ip_address['subnet']['id'] == subnet['id']:\n # Pick the default ips if any\n if vm_data['gateway_subnet'] is not None:\n if subnet['id'] == vm_data['gateway_subnet']['id']:\n data['default_ips'].append(address)\n data['default_gateway'] = gateway\n data['default_netmask_int'] = netmask_int\n data['default_vlan'] = vlan\n continue\n # else store the non gateway subnet ips\n non_default_ips.append(address)\n\n if len(non_default_ips) > 0:\n data['ip_addresses'].append({\n 'ips': non_default_ips,\n 'gateway': gateway,\n 'netmask_int': netmask_int,\n 'vlan': vlan,\n })\n\n # Add locale data to the VM\n data['language'] = 'en_IE'\n data['timezone'] = 'GMT Standard Time'\n\n # Get the host name of the server\n host_name = None\n for interface in vm_data['server_data']['interfaces']:\n if interface['enabled'] is True and interface['ip_address'] is not None:\n if IPAddress(str(interface['ip_address'])).version == 6:\n host_name = interface['hostname']\n break\n if host_name is None:\n error = f'Host name is not found for the server # {vm_data[\"server_id\"]}'\n Windows.logger.error(error)\n vm_data['errors'].append(error)\n return None\n\n # Add the host information to the data\n data['host_name'] = host_name\n data['network_drive_url'] = settings.NETWORK_DRIVE_URL\n data['vms_path'] = settings.HYPERV_VMS_PATH\n\n return data",
"def disk(self):\n return self.__disk",
"def fixture_retrieved():\n from aiida.plugins import DataFactory\n from aiida_logger.tests import TEST_DIR\n\n retrieved = DataFactory('folder')()\n retrieved.put_object_from_tree(path=os.path.join(TEST_DIR, 'input_files'))\n\n return retrieved",
"def get_template(self):\n if not self.cache:\n self.close()\n return self.cache",
"def disk(self):\n return self._context.get(\"disk\", None)",
"def _load_disk(self):",
"def _load_disk(self):"
] | [
"0.56004196",
"0.5590748",
"0.5581125",
"0.5569269",
"0.5568401",
"0.5486315",
"0.5423215",
"0.5413344",
"0.5405264",
"0.5395757",
"0.53775305",
"0.5358232",
"0.535637",
"0.5337047",
"0.5311859",
"0.52786845",
"0.5273297",
"0.5271104",
"0.5260368",
"0.5256782",
"0.5250343",
"0.5241731",
"0.5231731",
"0.52031183",
"0.5201769",
"0.51704717",
"0.5140401",
"0.5135287",
"0.5119547",
"0.5119547"
] | 0.5808441 | 0 |
Returns disk object from disks' collection __author__ = "ratamir" | def get_disk_obj(disk_alias, attribute='name'):
return DISKS_API.find(disk_alias, attribute=attribute) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disk(self):\n return self.__disk",
"def getDisk(self, item):\n return self.disks[item]",
"def _getDisk(self):\n try:\n disk = self.parents[0]\n except IndexError:\n disk = None\n return disk",
"def GetDisk(self, disk_name: str) -> 'AZComputeDisk':\n disks = self.ListDisks()\n if disk_name not in disks:\n raise errors.ResourceNotFoundError(\n 'Disk {0:s} was not found in instance {1:s}'.format(\n disk_name, self.resource_id), __name__)\n return disks[disk_name]",
"def disk(self) -> HwDisk:\n return self._disk",
"def GetDisk(\n self,\n disk_name: str,\n resource_group_name: Optional[str] = None) -> 'AZComputeDisk':\n disks = self.ListDisks(resource_group_name=resource_group_name)\n if disk_name not in disks:\n raise errors.ResourceNotFoundError(\n 'Disk {0:s} was not found in subscription {1:s}'.format(\n disk_name, self.az_account.subscription_id), __name__)\n return disks[disk_name]",
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def disk(self, disk_id):\n try:\n return self._disks[disk_id]\n except KeyError:\n util.log_error(\n \"couldn't find disk {} on vm {}\".format(disk_id, self.id)\n )\n raise",
"def disk(self):\n return self._context.get(\"disk\", None)",
"def disk_get(context, disk_id):\n return NotImplemented",
"def GenericGetDiskInfo(self, uuid=None, name=None):\n if uuid:\n disk = self.cfg.GetDiskInfo(uuid)\n if disk is None:\n raise errors.OpPrereqError(\"No disk was found with this UUID: %s\" %\n uuid, errors.ECODE_INVAL)\n elif name:\n disk = self.cfg.GetDiskInfoByName(name)\n if disk is None:\n raise errors.OpPrereqError(\"No disk was found with this name: %s\" %\n name, errors.ECODE_INVAL)\n else:\n raise errors.ProgrammerError(\"No disk UUID or name was given\")\n\n return disk",
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def ParseDiskResource(resources, name, project, zone, type_):\n if type_ == compute_scopes.ScopeEnum.REGION:\n return resources.Parse(\n name,\n collection='compute.regionDisks',\n params={\n 'project': project,\n 'region': utils.ZoneNameToRegionName(zone)\n })\n else:\n return resources.Parse(\n name,\n collection='compute.disks',\n params={\n 'project': project,\n 'zone': zone\n })",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def _load_disk(self):\r\n pass",
"def drives(self):\n if \"drives\" in self._prop_dict:\n return DrivesCollectionPage(self._prop_dict[\"drives\"])\n else:\n return None",
"async def get_disks(self, oid):\n pool = await self.query([('id', '=', oid)], {'get': True})\n if not pool['is_decrypted']:\n yield\n async for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):\n yield i",
"def _load_disk(self):",
"def _load_disk(self):",
"def select_disk(title='Which disk?', disks=[]):\n # Build menu\n disk_options = []\n for disk in disks:\n display_name = '{}\\t[{}] ({}) {}'.format(\n disk.get('Size', ''),\n disk.get('Table', ''),\n disk.get('Type', ''),\n disk.get('Name', 'Unknown'),\n )\n pwidth=len(str(len(disk['Partitions'])))\n for partition in disk['Partitions']:\n # Main text\n p_name = 'Partition {num:>{width}}: {size} ({fs})'.format(\n num = partition['Number'],\n width = pwidth,\n size = partition['Size'],\n fs = partition['FileSystem'])\n if partition['Name']:\n p_name += '\\t\"{}\"'.format(partition['Name'])\n\n # Show unsupported partition(s)\n if is_bad_partition(partition):\n p_name = '{YELLOW}{p_name}{CLEAR}'.format(\n p_name=p_name, **COLORS)\n\n display_name += '\\n\\t\\t\\t{}'.format(p_name)\n if not disk['Partitions']:\n display_name += '\\n\\t\\t\\t{}No partitions found.{}'.format(\n COLORS['YELLOW'], COLORS['CLEAR'])\n\n disk_options.append({'Name': display_name, 'Disk': disk})\n actions = [\n {'Name': 'Main Menu', 'Letter': 'M'},\n ]\n\n # Menu loop\n selection = menu_select(\n title = title,\n main_entries = disk_options,\n action_entries = actions)\n\n if (selection.isnumeric()):\n return disk_options[int(selection)-1]['Disk']\n elif (selection == 'M'):\n raise GenericAbort",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def get_disk(disk_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskResult:\n __args__ = dict()\n __args__['diskName'] = disk_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:compute/v20230402:getDisk', __args__, opts=opts, typ=GetDiskResult).value\n\n return AwaitableGetDiskResult(\n bursting_enabled=pulumi.get(__ret__, 'bursting_enabled'),\n bursting_enabled_time=pulumi.get(__ret__, 'bursting_enabled_time'),\n completion_percent=pulumi.get(__ret__, 'completion_percent'),\n creation_data=pulumi.get(__ret__, 'creation_data'),\n data_access_auth_mode=pulumi.get(__ret__, 'data_access_auth_mode'),\n disk_access_id=pulumi.get(__ret__, 'disk_access_id'),\n disk_iops_read_only=pulumi.get(__ret__, 'disk_iops_read_only'),\n disk_iops_read_write=pulumi.get(__ret__, 'disk_iops_read_write'),\n disk_m_bps_read_only=pulumi.get(__ret__, 'disk_m_bps_read_only'),\n disk_m_bps_read_write=pulumi.get(__ret__, 'disk_m_bps_read_write'),\n disk_size_bytes=pulumi.get(__ret__, 'disk_size_bytes'),\n disk_size_gb=pulumi.get(__ret__, 'disk_size_gb'),\n disk_state=pulumi.get(__ret__, 'disk_state'),\n encryption=pulumi.get(__ret__, 'encryption'),\n encryption_settings_collection=pulumi.get(__ret__, 'encryption_settings_collection'),\n extended_location=pulumi.get(__ret__, 'extended_location'),\n hyper_v_generation=pulumi.get(__ret__, 'hyper_v_generation'),\n id=pulumi.get(__ret__, 'id'),\n last_ownership_update_time=pulumi.get(__ret__, 'last_ownership_update_time'),\n location=pulumi.get(__ret__, 'location'),\n managed_by=pulumi.get(__ret__, 'managed_by'),\n managed_by_extended=pulumi.get(__ret__, 'managed_by_extended'),\n max_shares=pulumi.get(__ret__, 'max_shares'),\n name=pulumi.get(__ret__, 'name'),\n network_access_policy=pulumi.get(__ret__, 'network_access_policy'),\n optimized_for_frequent_attach=pulumi.get(__ret__, 'optimized_for_frequent_attach'),\n os_type=pulumi.get(__ret__, 'os_type'),\n property_updates_in_progress=pulumi.get(__ret__, 'property_updates_in_progress'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n public_network_access=pulumi.get(__ret__, 'public_network_access'),\n purchase_plan=pulumi.get(__ret__, 'purchase_plan'),\n security_profile=pulumi.get(__ret__, 'security_profile'),\n share_info=pulumi.get(__ret__, 'share_info'),\n sku=pulumi.get(__ret__, 'sku'),\n supported_capabilities=pulumi.get(__ret__, 'supported_capabilities'),\n supports_hibernation=pulumi.get(__ret__, 'supports_hibernation'),\n tags=pulumi.get(__ret__, 'tags'),\n tier=pulumi.get(__ret__, 'tier'),\n time_created=pulumi.get(__ret__, 'time_created'),\n type=pulumi.get(__ret__, 'type'),\n unique_id=pulumi.get(__ret__, 'unique_id'),\n zones=pulumi.get(__ret__, 'zones'))",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"def disks(self) -> List[CachingDisk]:\n return self._disk_fetcher.disks",
"def show_disks(self):\n icon = Icons.Icons() # Icon\n\n # For every disk, listing information\n icon.show_icon(\"disk\")\n for disk in DISKS:\n self.__get_info(disk)",
"def getVmDisk(vmName, alias=None, disk_id=None):\n value = None\n if disk_id:\n prop = \"id\"\n value = disk_id\n elif alias:\n prop = \"name\"\n value = alias\n else:\n logger.error(\"No disk identifier or name was provided\")\n return None\n return get_disk_obj_from_disk_attachment(\n get_disk_attachment(vmName, value, prop)\n )"
] | [
"0.7052868",
"0.682129",
"0.6661143",
"0.66152626",
"0.6550657",
"0.6482319",
"0.64641225",
"0.64555496",
"0.63855463",
"0.63251585",
"0.62917346",
"0.6245236",
"0.62283444",
"0.61439914",
"0.6135759",
"0.6087165",
"0.60854214",
"0.60216755",
"0.5937269",
"0.59311324",
"0.5924696",
"0.5905789",
"0.5905789",
"0.5902531",
"0.58868444",
"0.58770734",
"0.58270365",
"0.5820064",
"0.58134",
"0.5804216"
] | 0.6881587 | 1 |
Prepare or update disk object according to its kwargs __author__ = jlibosva | def _prepareDiskObject(**kwargs):
storage_domain_name = kwargs.pop('storagedomain', None)
# Tuple (lun_address, lun_target, lun_id, lun_port)
lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),
kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))
# Tuple (username, password)
lun_creds = (kwargs.pop('lun_username', None),
kwargs.pop('lun_password', None))
type_ = kwargs.pop('type_', None)
storage_connection = kwargs.pop('storage_connection', None)
if lun != (None, None, None, 3260) and storage_connection:
logger.error(
"You cannot set storage connection id and LUN params in one call!")
return None
kwargs.pop('active', None)
disk = kwargs.pop('update', None)
if disk is None:
disk = data_st.Disk(**kwargs)
if storage_connection is not None:
storage = data_st.HostStorage()
storage.id = storage_connection
disk.set_lun_storage(storage)
if storage_domain_name is not None:
storage_domain = STORAGE_DOMAIN_API.find(storage_domain_name,
NAME_ATTR)
storage_domains = data_st.StorageDomains()
storage_domains.add_storage_domain(storage_domain)
disk.storage_domains = storage_domains
# quota
quota_id = kwargs.pop('quota', None)
if quota_id == '':
disk.set_quota(data_st.Quota())
elif quota_id:
disk.set_quota(data_st.Quota(id=quota_id))
if lun != (None, None, None, 3260):
direct_lun = data_st.LogicalUnit(address=lun[0], target=lun[1],
id=lun[2], port=lun[3])
if lun_creds != (None, None):
direct_lun.set_username(lun_creds[0])
direct_lun.set_password(lun_creds[1])
logical_units = data_st.LogicalUnits(logical_unit=[direct_lun])
disk.set_lun_storage(
data_st.HostStorage(logical_units=logical_units, type_=type_)
)
# id
disk_id = kwargs.pop('id', None)
if disk_id:
disk.set_id(disk_id)
# read_only
read_only = kwargs.pop('read_only', None)
if read_only is not None:
disk.set_read_only(read_only)
# snapshot
snapshot = kwargs.pop('snapshot', None)
if snapshot:
disk.set_snapshot(snapshot)
# description
description = kwargs.pop('description', None)
if description is not None:
disk.set_description(description)
# qcow_version
qcow_version = kwargs.pop('qcow_version', None)
if qcow_version:
disk.set_qcow_version(qcow_version)
return disk | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, _id, disks, description=None, tags=None, modifiedBy=None, createdBy=None, dateModified=None, dateCreated=None, domains=None):\n pass",
"def __init__(__self__, *,\n create_option: pulumi.Input[Union[str, 'DiskCreateOption']],\n gallery_image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,\n image_reference: Optional[pulumi.Input['ImageDiskReferenceArgs']] = None,\n logical_sector_size: Optional[pulumi.Input[int]] = None,\n performance_plus: Optional[pulumi.Input[bool]] = None,\n security_data_uri: Optional[pulumi.Input[str]] = None,\n source_resource_id: Optional[pulumi.Input[str]] = None,\n source_uri: Optional[pulumi.Input[str]] = None,\n storage_account_id: Optional[pulumi.Input[str]] = None,\n upload_size_bytes: Optional[pulumi.Input[float]] = None):\n pulumi.set(__self__, \"create_option\", create_option)\n if gallery_image_reference is not None:\n pulumi.set(__self__, \"gallery_image_reference\", gallery_image_reference)\n if image_reference is not None:\n pulumi.set(__self__, \"image_reference\", image_reference)\n if logical_sector_size is not None:\n pulumi.set(__self__, \"logical_sector_size\", logical_sector_size)\n if performance_plus is not None:\n pulumi.set(__self__, \"performance_plus\", performance_plus)\n if security_data_uri is not None:\n pulumi.set(__self__, \"security_data_uri\", security_data_uri)\n if source_resource_id is not None:\n pulumi.set(__self__, \"source_resource_id\", source_resource_id)\n if source_uri is not None:\n pulumi.set(__self__, \"source_uri\", source_uri)\n if storage_account_id is not None:\n pulumi.set(__self__, \"storage_account_id\", storage_account_id)\n if upload_size_bytes is not None:\n pulumi.set(__self__, \"upload_size_bytes\", upload_size_bytes)",
"def __init__(self):\n self.id = None\n self.typeInfo['id'] = 'string'\n \"\"\"the cache mode to use for this disk offering. none, writeback or writethrough\"\"\"\n self.cacheMode = None\n self.typeInfo['cacheMode'] = 'string'\n \"\"\"the date this disk offering was created\"\"\"\n self.created = None\n self.typeInfo['created'] = 'date'\n \"\"\"bytes read rate of the disk offering\"\"\"\n self.diskBytesReadRate = None\n self.typeInfo['diskBytesReadRate'] = 'long'\n \"\"\"bytes write rate of the disk offering\"\"\"\n self.diskBytesWriteRate = None\n self.typeInfo['diskBytesWriteRate'] = 'long'\n \"\"\"io requests read rate of the disk offering\"\"\"\n self.diskIopsReadRate = None\n self.typeInfo['diskIopsReadRate'] = 'long'\n \"\"\"io requests write rate of the disk offering\"\"\"\n self.diskIopsWriteRate = None\n self.typeInfo['diskIopsWriteRate'] = 'long'\n \"\"\"the size of the disk offering in GB\"\"\"\n self.disksize = None\n self.typeInfo['disksize'] = 'long'\n \"\"\"whether to display the offering to the end user or not.\"\"\"\n self.displayoffering = None\n self.typeInfo['displayoffering'] = 'boolean'\n \"\"\"an alternate display text of the disk offering.\"\"\"\n self.displaytext = None\n self.typeInfo['displaytext'] = 'string'\n \"\"\"the domain name this disk offering belongs to. Ignore this information as it is not currently applicable.\"\"\"\n self.domain = None\n self.typeInfo['domain'] = 'string'\n \"\"\"the domain ID this disk offering belongs to. Ignore this information as it is not currently applicable.\"\"\"\n self.domainid = None\n self.typeInfo['domainid'] = 'string'\n \"\"\"Hypervisor snapshot reserve space as a percent of a volume (for managed storage using Xen)\"\"\"\n self.hypervisorsnapshotreserve = None\n self.typeInfo['hypervisorsnapshotreserve'] = 'integer'\n \"\"\"true if disk offering uses custom size, false otherwise\"\"\"\n self.iscustomized = None\n self.typeInfo['iscustomized'] = 'boolean'\n \"\"\"true if disk offering uses custom iops, false otherwise\"\"\"\n self.iscustomizediops = None\n self.typeInfo['iscustomizediops'] = 'boolean'\n \"\"\"the max iops of the disk offering\"\"\"\n self.maxiops = None\n self.typeInfo['maxiops'] = 'long'\n \"\"\"the min iops of the disk offering\"\"\"\n self.miniops = None\n self.typeInfo['miniops'] = 'long'\n \"\"\"the name of the disk offering\"\"\"\n self.name = None\n self.typeInfo['name'] = 'string'\n \"\"\"provisioning type used to create volumes. Valid values are thin, sparse, fat.\"\"\"\n self.provisioningtype = None\n self.typeInfo['provisioningtype'] = 'string'\n \"\"\"the storage type for this disk offering\"\"\"\n self.storagetype = None\n self.typeInfo['storagetype'] = 'string'\n \"\"\"the tags for the disk offering\"\"\"\n self.tags = None\n self.typeInfo['tags'] = 'string'",
"def __init__(__self__,\n resource_name: str,\n args: VirtualHardDiskArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(self, *save_args, **save_kwargs):\n self.provides = save_kwargs.pop('provides', None)\n self.flag_update = save_kwargs.pop('flag_update', True)\n self.save_args = save_args\n self.save_kwargs = save_kwargs",
"def do_install_disk(cls, disk, disk_name, creator, workdir, oe_builddir,\n bootimg_dir, kernel_dir, native_sysroot):\n logger.debug(\"SourcePlugin: do_install_disk: disk: %s\", disk_name)",
"def prepare_install_instance(self):\n # These must be created and cached beforehand\n # TODO: Automate\n driver_locations = self.cache.retrieve_and_cache_object(\"driver-iso\", self, None, True)\n self.driver_iso_volume = driver_locations['cinder']\n iso_locations = self.cache.retrieve_and_cache_object(\"install-iso\",\n self, self.install_media_location, True)\n if self.env.is_floppy():\n self.iso_volume = iso_locations['cinder']\n self._prepare_floppy()\n self.log.debug (\"Prepared cinder iso (%s), driver_iso (%s) and\\\n floppy (%s) for install instance\" % (self.iso_volume,\n self.driver_iso_volume, self.floppy_volume)) \n else:\n self._respin_iso(iso_locations['local'], \"x86_64\")\n self.iso_volume_delete = True",
"def prepare_dev(\n data,\n journal,\n fstype,\n mkfs_args,\n mount_options,\n cluster_uuid,\n osd_uuid,\n journal_uuid,\n journal_dmcrypt,\n osd_dm_keypath,\n ):\n\n ptype_tobe = TOBE_UUID\n ptype_osd = OSD_UUID\n if osd_dm_keypath:\n ptype_tobe = DMCRYPT_TOBE_UUID\n ptype_osd = DMCRYPT_OSD_UUID\n\n rawdev = None\n if is_partition(data):\n LOG.debug('OSD data device %s is a partition', data)\n rawdev = data\n else:\n LOG.debug('Creating osd partition on %s', data)\n try:\n command_check_call(\n [\n 'sgdisk',\n '--largest-new=1',\n '--change-name=1:ceph data',\n '--partition-guid=1:{osd_uuid}'.format(\n osd_uuid=osd_uuid,\n ),\n '--typecode=1:%s' % ptype_tobe,\n '--',\n data,\n ],\n )\n update_partition('-a', data, 'created')\n command(\n [\n # wait for udev event queue to clear\n 'udevadm',\n 'settle',\n ],\n )\n except subprocess.CalledProcessError as e:\n raise Error(e)\n\n rawdev = get_partition_dev(data, 1)\n\n dev = None\n if osd_dm_keypath:\n dev = dmcrypt_map(rawdev, osd_dm_keypath, osd_uuid)\n else:\n dev = rawdev\n\n try:\n args = [\n 'mkfs',\n '-t',\n fstype,\n ]\n if mkfs_args is not None:\n args.extend(mkfs_args.split())\n if fstype == 'xfs':\n args.extend(['-f']) # always force\n else:\n args.extend(MKFS_ARGS.get(fstype, []))\n args.extend([\n '--',\n dev,\n ])\n try:\n LOG.debug('Creating %s fs on %s', fstype, dev)\n command_check_call(args)\n except subprocess.CalledProcessError as e:\n raise Error(e)\n\n #remove whitespaces from mount_options\n if mount_options is not None:\n mount_options = \"\".join(mount_options.split())\n\n path = mount(dev=dev, fstype=fstype, options=mount_options)\n\n try:\n prepare_dir(\n path=path,\n journal=journal,\n cluster_uuid=cluster_uuid,\n osd_uuid=osd_uuid,\n journal_uuid=journal_uuid,\n journal_dmcrypt=journal_dmcrypt,\n )\n finally:\n unmount(path)\n finally:\n if rawdev != dev:\n dmcrypt_unmap(osd_uuid)\n\n if not is_partition(data):\n try:\n command_check_call(\n [\n 'sgdisk',\n '--typecode=1:%s' % ptype_osd,\n '--',\n data,\n ],\n )\n except subprocess.CalledProcessError as e:\n raise Error(e)",
"def saveInitialBook(email):\n\n bookname = hashlib.sha512((email + \"cow\").encode(\"utf-8\")).hexdigest()\n copy(\"/var/www/books/intro.obj\", \"/var/www/books/\"+bookname+\".obj\")",
"def __init__(self, updater_name, repository_mirrors):\n \n # Do the arguments have the correct format?\n # These checks ensure the arguments have the appropriate\n # number of objects and object types and that all dict\n # keys are properly named.\n # Raise 'tuf.FormatError' if there is a mistmatch.\n tuf.formats.NAME_SCHEMA.check_match(updater_name)\n tuf.formats.MIRRORDICT_SCHEMA.check_match(repository_mirrors)\n \n # Save the validated arguments.\n self.name = updater_name\n self.mirrors = repository_mirrors\n\n # Store the trusted metadata read from disk.\n self.metadata = {}\n \n # Store the currently trusted/verified metadata.\n self.metadata['current'] = {} \n \n # Store the previously trusted/verified metadata.\n self.metadata['previous'] = {}\n\n # Store the file information of all the metadata files. The dict keys are\n # paths, the dict values fileinfo data. This information can help determine\n # whether a metadata file has changed and so needs to be re-downloaded.\n self.fileinfo = {}\n \n # Store the location of the client's metadata directory.\n self.metadata_directory = {}\n \n # Ensure the repository metadata directory has been set.\n if tuf.conf.repository_directory is None:\n message = 'The TUF update client module must specify the directory' \\\n ' containing the local repository files.' \\\n ' \"tuf.conf.repository_directory\" MUST be set.'\n raise tuf.RepositoryError(message)\n\n # Set the path for the current set of metadata files. \n repository_directory = tuf.conf.repository_directory\n current_path = os.path.join(repository_directory, 'metadata', 'current')\n \n # Ensure the current path is valid/exists before saving it.\n if not os.path.exists(current_path):\n message = 'Missing '+repr(current_path)+'. This path must exist and, ' \\\n 'at a minimum, contain the root metadata file.' \n raise tuf.RepositoryError(message)\n self.metadata_directory['current'] = current_path\n \n # Set the path for the previous set of metadata files. \n previous_path = os.path.join(repository_directory, 'metadata', 'previous') \n \n # Ensure the previous path is valid/exists.\n if not os.path.exists(previous_path):\n message = 'Missing '+repr(previous_path)+'. This path must exist.'\n raise tuf.RepositoryError(message)\n self.metadata_directory['previous'] = previous_path\n \n # Load current and previous metadata.\n for metadata_set in ['current', 'previous']:\n for metadata_role in ['root', 'targets', 'release', 'timestamp']:\n self._load_metadata_from_file(metadata_set, metadata_role)\n \n # Raise an exception if the repository is missing the required 'root'\n # metadata.\n if 'root' not in self.metadata['current']:\n message = 'No root of trust! Could not find the \"root.txt\" file.'\n raise tuf.RepositoryError(message)",
"def __init__(self, conn, name, alias, **kwargs):\n super().__init__()\n\n self._alias = alias\n self._name = name\n self._properties = kwargs\n\n lv_pool = conn.storagePoolLookupByName(self.pool)\n lv_pool.createXML(\n self._volume_xml(),\n libvirt.VIR_STORAGE_VOL_CREATE_PREALLOC_METADATA\n )\n lv_volume = lv_pool.storageVolLookupByName(self.name)\n self._path = lv_volume.path()\n\n self._xml = ElementTree.Element('disk')\n self._xml.attrib['type'] = 'file'\n self._xml.attrib['device'] = 'disk'\n driver_element = ElementTree.Element('driver')\n driver_element.attrib['name'] = 'qemu'\n driver_element.attrib['type'] = 'qcow2'\n self._xml.append(driver_element)\n target_element = ElementTree.Element('target')\n target_element.attrib['dev'] = self.target\n target_element.attrib['bus'] = 'virtio'\n self._xml.append(target_element)\n source_element = ElementTree.Element('source')\n source_element.attrib['file'] = self.path\n self._xml.append(source_element)\n alias_element = ElementTree.Element('alias')\n alias_element.attrib['name'] = 'virtio-%s' % self.alias\n self._xml.append(alias_element)\n\n LOG.debug(\"Define virtual disk %s (%s bytes)\", self.name, self.capacity)",
"def __init__(self, debug=False, output_writer=None):\n super(RecycleBinMetadataFile, self).__init__(\n debug=debug, output_writer=output_writer)\n self.deletion_time = None\n self.format_version = None\n self.original_filename = None\n self.original_file_size = None",
"def test_create_drives_drive_firmware_update_item(self):\n pass",
"def __init__(self):\n \n self.id = \"\"\n self.uploaded_by = \"\"\n self.description = \"\"\n self.file_size = \"\"\n self.version = \"\"\n self.uploaded_date = \"\"\n self.uploaded_date_long = 0",
"def _load_disk(self):",
"def _load_disk(self):",
"def __init__(self, attck_obj = None, **kwargs):\n\n self.attck_obj = attck_obj\n\n self.id = super(AttckTools, self)._set_id(kwargs)\n self.name = super(AttckTools, self)._set_attribute(kwargs, 'name')\n self.alias = super(AttckTools, self)._set_attribute(kwargs, 'aliases')\n self.description = super(AttckTools, self)._set_attribute(kwargs, 'description')\n self.reference = super(AttckTools, self)._set_reference(kwargs)\n self.created = super(AttckTools, self)._set_attribute(kwargs, 'created')\n self.modified = super(AttckTools, self)._set_attribute(kwargs, 'modified')\n self.stix = super(AttckTools, self)._set_attribute(kwargs, 'id')\n self.type = super(AttckTools, self)._set_attribute(kwargs, 'type')\n self.wiki = super(AttckTools, self)._set_wiki(kwargs)\n self.contributor = super(AttckTools, self)._set_attribute(kwargs, 'contributor')",
"def __init__(self, infer_from_volume=None, kind=None, name=None, revision_name=None):\n\n self._infer_from_volume = None\n self._kind = None\n self._name = None\n self._revision_name = None\n\n if infer_from_volume is not None:\n self.infer_from_volume = infer_from_volume\n if kind is not None:\n self.kind = kind\n if name is not None:\n self.name = name\n if revision_name is not None:\n self.revision_name = revision_name",
"def __init__(self, name, bucket_id, created_by):\n self.name = name\n self.bucket_id = bucket_id\n self.created_by = created_by",
"def __init__(self, *args):\n self.client = None\n FileRepository.__init__(self, *args)",
"def __init__(self, title, filename, path, author='', pages=0,\n tags=[], filetype='pdf', bid=None):\n self.title = title\n self.filename = filename\n self.path = path\n self.author = author\n self.pages = pages\n self.tags = tags\n self.filetype = filetype\n self.bid = bid",
"def do_prepare_partition(cls, part, source_params, creator, cr_workdir,\n oe_builddir, bootimg_dir, kernel_dir, rootfs_dir,\n native_sysroot):\n logger.debug(\"SourcePlugin: do_prepare_partition: part: %s\", part)",
"def _load_disk(self):\r\n pass",
"def __init__(self):\n self.id = 0\n self.file_name = \"\"\n self.content_type = \"\"\n self.versions = []\n self.folder = Folder()\n self.url = \"\" \n\n self.upload_doc = []\n self.description = \"\" \n self.tags = \"\" \n self.notify = 0",
"def update_object(self, name: str) -> None:",
"def __init__(__self__, *,\n api_version: Optional[pulumi.Input[str]] = None,\n block_owner_deletion: Optional[pulumi.Input[bool]] = None,\n controller: Optional[pulumi.Input[bool]] = None,\n kind: Optional[pulumi.Input[str]] = None,\n name: Optional[pulumi.Input[str]] = None,\n uid: Optional[pulumi.Input[str]] = None):\n if api_version is not None:\n pulumi.set(__self__, \"api_version\", api_version)\n if block_owner_deletion is not None:\n pulumi.set(__self__, \"block_owner_deletion\", block_owner_deletion)\n if controller is not None:\n pulumi.set(__self__, \"controller\", controller)\n if kind is not None:\n pulumi.set(__self__, \"kind\", kind)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if uid is not None:\n pulumi.set(__self__, \"uid\", uid)",
"def __init__(self,guid='',name=''):\n extent.Extent.__init__(self,guid,name,'Physical LUN/Disk')\n self.exttype='physical'\n self.alias=''\n self.alwaysadd=True\n self.direct=True\n self.serial='' # SCSI Serial ID\n self.model='' # SCSI Model\n self.vendor='' # SCSI Vendor\n self.revision='' # SCSI revision\n self.raid=RaidLevel.none # underline raid '0' '1' '5' '6' '10'\n self.sectorsize=512\n self.paths=VSACollection(san.SanPath,self,'paths',desc='Paths To Disk',icon='link_g.png') # list of paths leading to this physical disk (local or remote), key = dev/hbtl\n self.partitions=VSACollection(SanPartition,self,'partitions',icon='vm_g.png') # list of paths leading to this physical disk (local or remote), key = dev/hbtl\n self.usedin=RefDict(extent.Extent,self,'usedin',desc='Owned by Storage Extents',icon='hard_disk.png',call=lambda self:[v for v in self.usedby if v.exttype<>'partition'])\n self.assigned=[] # list of chunks assigned from this disk\n self.primordial=1 # primary extent, based on physical HW (e.g. not a logical volume)\n self.cachepvds=[] # list of provider names which have a matching cache logical volume (found in discovery, process extents)\n self.cachedrdev=None # points to cache DRBD device\n self._flush()",
"def preCommitFixup(self):\n log_method_call(self, self.name)\n if not self.exists or not self.disklabelSupported:\n return\n\n # find the correct partition on the original parted.Disk since the\n # name/number we're now using may no longer match\n _disklabel = self.disk.originalFormat\n\n if self.isExtended:\n # getPartitionBySector doesn't work on extended partitions\n _partition = _disklabel.extendedPartition\n log.debug(\"extended lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n else:\n # lookup the partition by sector to avoid the renumbering\n # nonsense entirely\n _sector = self.partedPartition.geometry.start\n _partition = _disklabel.partedDisk.getPartitionBySector(_sector)\n log.debug(\"sector-based lookup found partition %s\",\n devicePathToName(getattr(_partition, \"path\", None) or \"(none)\"))\n\n self.partedPartition = _partition",
"def createDisk(self , name):\n return",
"def disk(self, disk):\n self._context[\"disk\"] = disk"
] | [
"0.57172906",
"0.56479627",
"0.549607",
"0.5475961",
"0.54032505",
"0.539175",
"0.5331734",
"0.53218746",
"0.52937764",
"0.52769303",
"0.5270235",
"0.52216846",
"0.5219496",
"0.51791817",
"0.51659566",
"0.51659566",
"0.5165272",
"0.51422894",
"0.51333195",
"0.5112877",
"0.509336",
"0.508583",
"0.50844187",
"0.5083162",
"0.50813127",
"0.5055531",
"0.5053123",
"0.5047673",
"0.5043176",
"0.50393564"
] | 0.66322654 | 0 |
Detach disk from VM | def detachDisk(positive, alias, vmName):
logger.info("Detaching disk %s from vm %s", alias, vmName)
disk_attachment = get_disk_attachment(vmName, alias, attr='name')
return DISK_ATTACHMENTS_API.delete(disk_attachment, positive) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def disk_detach(vmdk_path, vm):\n\n device = findDeviceByPath(vmdk_path, vm)\n\n if not device:\n # Could happen if the disk attached to a different VM - attach fails\n # and docker will insist to sending \"unmount/detach\" which also fails.\n msg = \"*** Detach failed: disk={0} not found. VM={1}\".format(\n vmdk_path, vm.config.uuid)\n logging.warning(msg)\n return err(msg)\n\n spec = vim.vm.ConfigSpec()\n dev_changes = []\n\n disk_spec = vim.vm.device.VirtualDeviceSpec()\n disk_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n disk_spec.device = device\n dev_changes.append(disk_spec)\n spec.deviceChange = dev_changes\n\n try:\n wait_for_tasks(si, [vm.ReconfigVM_Task(spec=spec)])\n except vim.fault.GenericVmConfigFault as ex:\n for f in ex.faultMessage:\n logging.warning(f.message)\n return err(\"Failed to detach \" + vmdk_path)\n\n setStatusDetached(vmdk_path)\n logging.info(\"Disk detached %s\", vmdk_path)\n return None",
"def detachDiskFromMinipad(self , disk):\n return",
"def _DetachDisk(self, idx, root, _):\n hotmsg = \"\"\n if self.op.hotplug:\n hotmsg = self._HotplugDevice(constants.HOTPLUG_ACTION_REMOVE,\n constants.HOTPLUG_TARGET_DISK,\n root, None, idx)\n\n # Always shutdown the disk before detaching.\n ShutdownInstanceDisks(self, self.instance, [root])\n\n # Rename detached disk.\n #\n # Transform logical_id from:\n # <file_storage_dir>/<instance_name>/<disk_name>\n # to\n # <file_storage_dir>/<disk_name>\n if root.dev_type in (constants.DT_FILE, constants.DT_SHARED_FILE):\n file_driver = root.logical_id[0]\n instance_path, disk_name = os.path.split(root.logical_id[1])\n new_path = os.path.join(os.path.dirname(instance_path), disk_name)\n new_logical_id = (file_driver, new_path)\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(root, new_logical_id)])\n result.Raise(\"Failed before detach\")\n # Update logical_id\n self.cfg.SetDiskLogicalID(root.uuid, new_logical_id)\n\n # Remove disk from config\n self.cfg.DetachInstanceDisk(self.instance.uuid, root.uuid)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n return hotmsg",
"def detach_volume(self, host_path: str):\n del self.volumes[host_path]",
"def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):",
"def detach_disk_from_vm(self, vm_ref, instance_name, device):\n client_factory = self._session._get_vim().client.factory\n vmdk_detach_config_spec = vm_util.get_vmdk_detach_config_spec(\n client_factory, device)\n disk_key = device.key\n LOG.debug(_(\"Reconfiguring VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())\n reconfig_task = self._session._call_method(\n self._session._get_vim(),\n \"ReconfigVM_Task\", vm_ref,\n spec=vmdk_detach_config_spec)\n self._session._wait_for_task(instance_name, reconfig_task)\n LOG.debug(_(\"Reconfigured VM instance %(instance_name)s to detach \"\n \"disk %(disk_key)s\") % locals())",
"def detach_pd(self, conn, host, pd):\n zone = self.get_zone(conn, host)\n pdhost = self.get_pd_host(conn, pd, zone)\n if pdhost == \"\":\n self.tracer.info(\n \"disk %s is already attached to %s(%s)\" % (pd, host, zone))\n elif pdhost == host:\n self.tracer.info(\"attempting to detach %s from %s(%s)\" % (pd, host, zone))\n operation = conn.instances().detachDisk(project=PROJECT, zone=zone, instance=host, deviceName=pd).execute()\n self.wait_for_operation(conn, operation, zone)\n if self.get_pd_host(conn, pd, zone) == \"\":\n self.tracer.info(\"successfully detached %s from %s(%s)\" % (pd, host, zone))",
"def detach_volume(self):\n\n # Choose the volume\n volume_id = self._choose_among_used_volumes()\n\n # Cancel\n if not volume_id:\n print 'Operation cancelled'\n return\n\n # Detach the volume\n print '# Detaching volume \"%s\"!' % volume_id\n if self.compute.detach_volume(volume_id):\n print 'The volume has been detached!'\n else:\n print 'The volume could not been detached'",
"def detach(self, name):\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if len(vms) == 0:\n Console.error(f\"{name} is not attached to any vm\")\n else:\n removed = []\n for vm in vms:\n result = self.unmount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" not in mounts.keys():\n removed.append(vm)\n for vm in removed:\n vms.remove(vm)\n result = self.update_volume_after_detach(volume_info, vms)\n return result[0]\n else:\n Console.error(\"volume does not exist or volume had been deleted\")",
"def detach_volume(self, connection_info, instance, mountpoint,\n encryption=None):\n vhd_name = connection_info['data']['disk_name']\n vm = self._get_instance(instance.uuid)\n data_disks = vm.storage_profile.data_disks\n not_found = True\n for i in range(len(data_disks)):\n if vhd_name == data_disks[i].name:\n del data_disks[i]\n not_found = False\n break\n if not_found:\n LOG.info(_LI('Volume: %s was not attached to Instance!'),\n vhd_name, instance=instance)\n return\n self._create_update_instance(instance, vm)\n LOG.info(_LI(\"Detach Volume to Instance in Azure finish\"),\n instance=instance)",
"def detach(self):\r\n\r\n return self.driver.detach_volume(volume=self)",
"def detach(self):\n raise io.UnsupportedOperation",
"def delete(vmname, deldisk=True):\n\n dom = _conn.lookupByName(vmname)\n if dom.isActive():\n dom.destroy()\n infokeeper.update_status_vm(vmname, Instance.STATUS_POWER_OFF)\n dom.undefine()\n infokeeper.delete_vm(vmname)\n if deldisk:\n os.remove(os.path.join(base_disk_path, dom.name() + '.img'))\n return 'VM %s deleted' % vmname",
"def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):\n raise NotImplementedError()",
"def vm_diskdelete(args):\n name = args.name\n diskname = args.diskname\n pool = args.pool\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n if diskname is None:\n common.pprint(\"Missing diskname. Leaving...\", color='red')\n os._exit(1)\n common.pprint(\"Deleting disk %s\" % diskname)\n k.delete_disk(name=name, diskname=diskname, pool=pool)\n return",
"def detach_volume(self, instance_name, mountpoint):\n return True",
"def detach_volume(self, context, volume_id):\n # TODO(vish): refactor this into a more general \"unreserve\"\n # TODO(sleepsonthefloor): Is this 'elevated' appropriate?\n # self.db.volume_detached(context.elevated(), volume_id)\n self.db.volume_admin_metadata_delete(context.elevated(), volume_id,\n 'attached_mode')",
"def detach_volume(self, connection_info, instance, mountpoint):\n instance_name = instance['name']\n vm_ref = vm_util.get_vm_ref_from_name(self._session, instance_name)\n if vm_ref is None:\n raise exception.InstanceNotFound(instance_id=instance_name)\n # Detach Volume from VM\n LOG.debug(_(\"Detach_volume: %(instance_name)s, %(mountpoint)s\")\n % locals())\n driver_type = connection_info['driver_volume_type']\n if driver_type not in ['iscsi']:\n raise exception.VolumeDriverNotFound(driver_type=driver_type)\n data = connection_info['data']\n\n # Discover iSCSI Target\n device_name, uuid = volume_util.find_st(self._session, data,\n self._cluster)\n if device_name is None:\n raise volume_util.StorageError(_(\"Unable to find iSCSI Target\"))\n\n # Get the vmdk file name that the VM is pointing to\n hardware_devices = self._session._call_method(vim_util,\n \"get_dynamic_property\", vm_ref,\n \"VirtualMachine\", \"config.hardware.device\")\n device = vm_util.get_rdm_disk(hardware_devices, uuid)\n if device is None:\n raise volume_util.StorageError(_(\"Unable to find volume\"))\n self.detach_disk_from_vm(vm_ref, instance_name, device)\n LOG.info(_(\"Mountpoint %(mountpoint)s detached from \"\n \"instance %(instance_name)s\") % locals())",
"def delete_disk(self, disk, delete_vmdk=True):\n backend_disk = self.get_backend_disk(disk)\n\n try:\n self.client.delete_disk(disk.vm.backend_id, disk.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)\n\n if delete_vmdk:\n vdm = self.soap_client.content.virtualDiskManager\n task = vdm.DeleteVirtualDisk(\n name=backend_disk.backing.fileName,\n datacenter=self.get_disk_datacenter(backend_disk),\n )\n try:\n pyVim.task.WaitForTask(task)\n except Exception:\n logger.exception('Unable to delete VMware disk. Disk ID: %s.', disk.id)\n raise VMwareBackendError('Unknown error.')\n signals.vm_updated.send(self.__class__, vm=disk.vm)",
"def detach(self, storages):\n self.tracer.info(\"%s.attach method called\" % self.__class__.__name__)\n\n # init variables & arrays\n all_pds = []\n all_vgs = []\n unmount_err = 0\n\n # reload global.ini\n self._cfg.reload()\n\n # connect to Google API\n conn = self.api_conn()\n\n # fetch the GCE zone for this host\n zone = self.get_zone(conn, HOSTNAME)\n\n for storage in storages:\n # fetch pd & dev variables for specified partition & usage\n connectionData = self._getConnectionDataForLun(storage.get(\"partition\"), storage.get(\"usage_type\"))\n try:\n pd = connectionData[\"pd\"]\n dev = connectionData[\"dev\"]\n except:\n raise Exception(\"pd or dev not set in global.ini\")\n\n # fetch the host which currently owns the disk & the file path\n path = storage.get(\"path\")\n\n # try to unmount the file system twice\n self._forcedUnmount(dev, path, 2)\n\n # if it's still mounted, try killing blocking processes and umount again\n if os.path.ismount(path):\n self._lsof_and_kill(path)\n self._forcedUnmount(dev, path, 2)\n\n # if still mounted, raise exception. The taking over node will stonith this host\n if os.path.ismount(path):\n self.tracer.warning(\"A PID belonging to someone other than SIDADM is blocking the unmount. This node will be fenced\")\n self._umount(path, lazy=True)\n mount_err = 1\n\n # add to list of devices.\n all_pds.append(pd)\n\n # check to see if the device is a VG. If so, add it to the list of VG's\n all_vgs.append(self.get_vg(dev))\n\n # Stop each unique VG\n all_vgs = list(set(all_vgs))\n for vg in all_vgs:\n Helper._runOsCommand(\"sudo /sbin/vgchange -an %s\" % vg, self.tracer)\n self.tracer.info(\"stopping volume group %s\" % (vg))\n\n # for each unique disk detected, detach it using Google API's\n all_pds = list(set(all_pds))\n for pd_member in all_pds:\n self.detach_pd(conn, HOSTNAME, pd_member)\n\n # if there was an error unmounting, self fence\n if unmount_err == 1:\n self.fence(conn, pdhost)\n\n # tell HANA we successfully detached\n return 0",
"def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass",
"def _detach_volume(self, server, volume):\n try:\n volume = self.volumes_client.show_volume(volume['id'])['volume']\n # Check the status. You can only detach an in-use volume, otherwise\n # the compute API will return a 400 response.\n if volume['status'] == 'in-use':\n self.servers_client.detach_volume(server['id'], volume['id'])\n except lib_exc.NotFound:\n # Ignore 404s on detach in case the server is deleted or the volume\n # is already detached.\n pass",
"def detach(self, force=False):\r\n instance_id = None\r\n if self.attach_data:\r\n instance_id = self.attach_data.instance_id\r\n device = None\r\n if self.attach_data:\r\n device = self.attach_data.device\r\n return self.connection.detach_volume(self.id, instance_id, device, force)",
"def detach(args, **_):\n\n volume_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.source.instance)\n instance_id = \\\n utils.get_external_resource_id_or_raise(\n 'detach volume', ctx.target.instance)\n\n if _detach_external_volume_or_instance():\n return\n\n ctx.logger.debug('Detaching EBS volume {0}'.format(volume_id))\n\n volume_object = _get_volumes_from_id(volume_id)\n\n if not volume_object:\n raise NonRecoverableError(\n 'EBS volume {0} not found in account.'.format(volume_id))\n\n try:\n detached = volume_object.detach(**args)\n except (boto.exception.EC2ResponseError,\n boto.exception.BotoServerError) as e:\n raise NonRecoverableError('{0}'.format(str(e)))\n\n if not detached:\n raise NonRecoverableError(\n 'Failed to detach volume {0} from instance {1}'\n .format(volume_id, instance_id))\n\n utils.unassign_runtime_property_from_resource(\n 'instance_id', ctx.source.instance)\n ctx.logger.info(\n 'Detached volume {0} from instance {1}.'\n .format(volume_id, instance_id))",
"def detach_volume(self,\n connection_info,\n instance,\n mountpoint,\n encryption=None):\n volume_data = connection_info['data']\n azure_name = self._get_omni_name_from_instance(instance)\n azure_instance = utils.get_instance(\n self.compute_client, drv_conf.resource_group, azure_name)\n data_disks = azure_instance.storage_profile.data_disks\n name = volume_data['name']\n filtered_disks = [disk for disk in data_disks if disk.name != name]\n if len(filtered_disks) == len(data_disks):\n LOG.error(\"Volume %s was not attached to instance %s\" %\n (name, instance.uuid))\n return\n azure_instance.storage_profile.data_disks = filtered_disks\n utils.create_or_update_instance(self.compute_client,\n drv_conf.resource_group, azure_name,\n azure_instance)\n LOG.info(\"Detached volume %s from instance %s\" % (name, instance.uuid))",
"def disconnect_disk(self, instance, stg_ftsk=None, disk_type=None):\n raise NotImplementedError()",
"async def eject(self) -> None:\n await self.dbus.Drive.call_eject(UDISKS2_DEFAULT_OPTIONS)",
"def remove_vdisk_from_svc(svc, vdisk):\r\n svc_ssh = openSSH(svc, getpass.getuser())\r\n ## First we need to unmap from the host\r\n print \"Removing the mapping between %s on %s...\" % (vdisk[\"name\"],\r\n vdisk[\"hostlist\"][0])\r\n command = \"rmvdiskhostmap -host %s %s\" % (vdisk[\"hostlist\"][0],\r\n vdisk[\"name\"])\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n ## Remove the volume\r\n print \"Removing the vdisk %s...\" % vdisk[\"name\"]\r\n command = \"rmvdisk %s\" % vdisk[\"name\"]\r\n print command\r\n output = svc_ssh.exec_command(command)[1].readlines()\r\n for line in output:\r\n print line.strip()\r\n svc_ssh.close()\r\n ## End remove_vdisk_from_svc\r",
"def peer_detach(self):\n cmd = \"gluster peer detach %s\"%(self.server)\n if self.force is True:\n cmd = cmd + ' force'\n cmdlist = shlex.split(cmd)\n output = subprocess.Popen(cmdlist, stdout = subprocess.PIPE,\n stderr = subprocess.PIPE)\n # TODO: Do more extensive error check\n stdout = output.stdout.read()\n stderr = output.stderr.read()\n print json.dumps({\n \"Server detached\": self.server,\n \"Status\": stdout\n })",
"def remove():\n vbox = Vbox(env.vm_name)\n vbox.remove()"
] | [
"0.77460337",
"0.7383604",
"0.7272938",
"0.70957154",
"0.7014271",
"0.69647926",
"0.6918265",
"0.6742025",
"0.67416346",
"0.6706193",
"0.66873896",
"0.6683356",
"0.66481096",
"0.6553151",
"0.65065736",
"0.6481779",
"0.64740014",
"0.6471214",
"0.64225125",
"0.64092827",
"0.63875246",
"0.63875246",
"0.62964183",
"0.62828",
"0.62000644",
"0.6181245",
"0.60987514",
"0.60890454",
"0.60563064",
"0.59976333"
] | 0.78766763 | 0 |
Get all disks interfaces/formats/allocation policies permutations possible | def get_all_disk_permutation(
block=True, shared=False, interfaces=(VIRTIO, VIRTIO_SCSI)
):
permutations = []
for disk_format in [FORMAT_COW, FORMAT_RAW]:
for interface in interfaces:
for sparse in [True, False]:
if disk_format is FORMAT_RAW and sparse and block:
continue
if disk_format is FORMAT_COW and not sparse:
continue
if shared and disk_format is FORMAT_COW:
continue
if not sparse and not block:
continue
permutation = {'format': disk_format,
'interface': interface,
'sparse': sparse,
}
permutations.append(permutation)
return permutations | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _LookupDiskMods(self):\n return [(op, self._LookupDiskIndex(idx), params)\n for op, idx, params in self.op.disks]",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def get_ordered_partitions(disks):\n parts = []\n for disk in disks:\n parts += disk.partitions\n parts.sort(lambda x,y: len(x.mntpnt or '')-len(y.mntpnt or ''))\n return parts",
"def test_get_hyperflex_ext_iscsi_storage_policy_list(self):\n pass",
"def get_partitions(adapter, lpars=True, vioses=True, mgmt=False):\n rets = []\n if vioses:\n rets.extend(vios.VIOS.get(adapter))\n if lpars:\n rets.extend(lpar.LPAR.get(adapter))\n\n # If they need the mgmt lpar, get it. But ONLY if we didn't get both\n # VIOSes and LPARs. If we got both of those already, then we are\n # guaranteed to already have the mgmt lpar in there.\n if mgmt and not (lpars and vioses):\n mgmt_w = get_mgmt_partition(adapter)\n if mgmt_w.uuid not in [x.uuid for x in rets]:\n rets.append(get_mgmt_partition(adapter))\n\n return rets",
"def scan_disks():\n disks = get_disks()\n\n # Get disk details\n for disk in disks:\n # Get partition style\n disk['Table'] = get_table_type(disk)\n\n # Get disk name/model and physical details\n disk.update(get_disk_details(disk))\n\n # Get partition info for disk\n disk['Partitions'] = get_partitions(disk)\n\n for partition in disk['Partitions']:\n # Get partition details\n partition.update(get_partition_details(disk, partition))\n\n # Done\n return disks",
"def test_get_hyperflex_ext_fc_storage_policy_list(self):\n pass",
"def _get_partition_list(self):\n raise NotImplementedError('Must be implemented in subclasses.')",
"def test_get_hyperflex_cluster_storage_policy_list(self):\n pass",
"def storage_policies(self, **kwargs):\n self.logger.debug(f\"Get storage policies data\")\n url_path = 'storage/policies'\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)",
"def consolidated_risks(self):\n privilege_escalation_results = {}\n resource_exposure_results = []\n data_exfiltration_results = []\n\n # Get it from each inline policy\n if self.inline_policies:\n for inline_policy in self.inline_policies:\n # Privilege Escalation\n if inline_policy.policy_document.allows_privilege_escalation:\n for entry in inline_policy.policy_document.allows_privilege_escalation:\n if entry[\"type\"] not in privilege_escalation_results.keys():\n privilege_escalation_results[entry[\"type\"]] = entry[\"actions\"]\n # Resource Exposure\n if inline_policy.policy_document.permissions_management_without_constraints:\n for action in inline_policy.policy_document.permissions_management_without_constraints:\n if action not in resource_exposure_results:\n resource_exposure_results.append(action)\n # Data Exfiltration\n if inline_policy.policy_document.allows_data_exfiltration_actions:\n for action in inline_policy.policy_document.allows_data_exfiltration_actions:\n if action not in data_exfiltration_results:\n data_exfiltration_results.append(action)\n\n if self.attached_managed_policies:\n for managed_policy in self.attached_managed_policies:\n # Privilege Escalation\n if managed_policy.policy_document.allows_privilege_escalation:\n for entry in managed_policy.policy_document.allows_privilege_escalation:\n if entry[\"type\"] not in privilege_escalation_results.keys():\n privilege_escalation_results[entry[\"type\"]] = entry[\"actions\"]\n # Resource Exposure\n if managed_policy.policy_document.permissions_management_without_constraints:\n for action in managed_policy.policy_document.permissions_management_without_constraints:\n if action not in resource_exposure_results:\n resource_exposure_results.append(action)\n # Data Exfiltration\n if managed_policy.policy_document.allows_data_exfiltration_actions:\n for action in managed_policy.policy_document.allows_data_exfiltration_actions:\n if action not in data_exfiltration_results:\n data_exfiltration_results.append(action)\n\n # turn it into a list because we want to be able to count the number of results\n these_privilege_escalation_results = []\n\n for key in privilege_escalation_results:\n result = {\n \"type\": key,\n \"actions\": privilege_escalation_results[key]\n }\n these_privilege_escalation_results.append(result)\n\n resource_exposure_results.sort()\n data_exfiltration_results.sort()\n\n results = {\n \"PrivilegeEscalation\": these_privilege_escalation_results,\n \"ResourceExposure\": resource_exposure_results,\n \"DataExfiltration\": data_exfiltration_results,\n }\n return results",
"def list_partitions(self, partitioning):\n return []",
"def get_protection_policies(cohesity_client):\n policy_list = cohesity_client.protection_policies.get_protection_policies()\n policy_list = policy_list if policy_list else []\n for policy in policy_list:\n exported_res_dict[\"Protection Policies\"].append(policy.name)\n return policy_list",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def get_policies():\r\n policy = policies.values()\r\n return policy",
"def basic_stabilizers(self):\n\n if self._transversals == []:\n self.schreier_sims()\n strong_gens = self._strong_gens\n base = self._base\n if not base: # e.g. if self is trivial\n return []\n strong_gens_distr = _distribute_gens_by_base(base, strong_gens)\n basic_stabilizers = []\n for gens in strong_gens_distr:\n basic_stabilizers.append(PermutationGroup(gens))\n return basic_stabilizers",
"def list_policies(policies, verbosity):\n print()\n if verbosity < 1:\n rows = []\n for p in sorted_by_name(policies):\n rows.append((p.name, p.generator, p.length, p.frequency))\n print_table(('NAME', 'GEN', 'LEN', 'FREQ'), rows)\n else:\n for policy in sorted_by_name(policies):\n chars = NONE\n if policy.disallowed_characters:\n chars = ''.join(sorted(policy.disallowed_characters))\n print_detail(\n policy.name, (\n ('description', nullable(policy.description)),\n ('specs', get_policy_specs(policy)),\n ('∅ chars', chars),\n ),\n )\n print()",
"def test_list_all_response_descriptor_policies_machine_policy_machine_policy_resource_spaces(self):\n pass",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def getOrderedPartitionList(whichTables):\n if not whichTables:\n return []\n order = socorro_pri.dependencyOrder(databaseDependenciesForPartition,whichTables)\n return order",
"def disk_partitions(all=False):\n phydevs = []\n f = open(\"/proc/filesystems\", \"r\")\n for line in f:\n if not line.startswith(\"nodev\"):\n phydevs.append(line.strip())\n\n retlist = []\n f = open('/etc/mtab', \"r\")\n for line in f:\n if not all and line.startswith('none'):\n continue\n fields = line.split()\n device = fields[0]\n mountpoint = fields[1]\n fstype = fields[2]\n if not all and fstype not in phydevs:\n continue\n if device == 'none':\n device = ''\n ntuple = disk_ntuple(device, mountpoint, fstype)\n retlist.append(ntuple)\n return retlist",
"def available_policies(self):\n return tuple(self._policies.keys())",
"def get_partitions(disk):\n partitions = []\n script = [\n 'select disk {}'.format(disk['Number']),\n 'list partition']\n\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Append partition numbers\n output = result.stdout.decode().strip()\n regex = r'Partition\\s+(\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)\\s+'\n for tmp in re.findall(regex, output, re.IGNORECASE):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n partitions.append({'Number': num, 'Size': size})\n\n return partitions",
"def getzKVMdisks():\n result = []\n\n devices = getAllHardDisks()\n\n # get disk that has 7 partitions\n for dev in devices:\n parts = getPartitions(dev)\n\n if len(parts) == 7:\n result.append(dev)\n\n return result",
"def parameter_space():\n return [list(range(7, 17)),\n list(range(17, 27)),\n list(range(27, 37)),\n list(permutations(range(1, 5), 4))]",
"def partitions(cls) -> list[None | dict[str, str]]: # noqa: N805\n partitions = []\n if hasattr(cls, \"years\") and hasattr(cls, \"states\"):\n partitions = [\n {\"year\": year, \"state\": state}\n for year, state in itertools.product(cls.years, cls.states)\n ]\n elif hasattr(cls, \"years\"):\n partitions = [{\"year\": part} for part in cls.years]\n return partitions",
"def specs(self) -> Dict[int, PartitionSpec]:\n return {spec.spec_id: spec for spec in self.metadata.partition_specs}",
"def _GetSpecsForSchema(self, schema):\n specs = []\n settings = Gio.Settings.new(schema)\n for action in settings.keys():\n # Handle custom keybindings specially.\n if action == 'custom-list':\n custom_keys = settings.get_strv(action)\n if custom_keys:\n specs += self._GetSpecsForCustomBindingSchema(schema,\n custom_keys)\n elif settings.get_value(action).is_of_type(self._keybinding_type):\n keybindings = settings.get_strv(action)\n if keybindings:\n specs += [self._BuildSpec(schema, action, binding)\n for binding in keybindings if binding]\n return specs",
"def inspect_storage_objects_for_debugging(k8s_ctx: str, dry_run: bool = False):\n cmd = f'kubectl --context={k8s_ctx} get pv,pvc -o=NAME'\n for storage_obj in run_commands([cmd], dry_run):\n cmd = f'kubectl --context={k8s_ctx} describe {storage_obj}'\n if dry_run:\n logging.debug(cmd)\n else:\n p = safe_exec(cmd)\n if p.stdout:\n for line in p.stdout.decode().split('\\n'):\n if line.startswith(\"Status\") or line.startswith(\"Finalizers\"):\n logging.debug(f'{storage_obj} {line}')",
"def mpt():\n lbl_drives = ['device','mountpoint','fstype']\n disks = [d[0:3] for d in psutil.disk_partitions()]\n drives = [dict(zip(lbl_drives,ds)) for ds in disks]\n return [d['mountpoint']for d in drives]"
] | [
"0.56328917",
"0.53854275",
"0.53334033",
"0.53093815",
"0.5276727",
"0.5263089",
"0.52455497",
"0.5224897",
"0.5166511",
"0.5132029",
"0.51318884",
"0.5120451",
"0.5088118",
"0.5072572",
"0.504253",
"0.5033381",
"0.50171",
"0.50147575",
"0.5010506",
"0.49924254",
"0.49893463",
"0.49666604",
"0.49523902",
"0.49193403",
"0.49081263",
"0.49057606",
"0.49028972",
"0.48817497",
"0.4877368",
"0.48726767"
] | 0.682289 | 0 |
Check if disk is in vm disks collection | def check_disk_visibility(disk, disks_list):
is_visible = disk in [disk_obj.get_alias() for disk_obj in disks_list]
return is_visible | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def checkDiskExists(positive, disk, attr='name'):\n try:\n DISKS_API.find(disk, attr)\n except EntityNotFound:\n return not positive\n return positive",
"def is_virtualized (self):\n return len([i for i in self.infras if\n i.infra_type not in (self.TYPE_INFRA_SDN_SW, self.TYPE_INFRA_EE,\n self.TYPE_INFRA_STATIC_EE)]) > 0",
"def is_booted_storage_device(disk):\n cmdline = (\"grep -w /ahcexport /proc/mounts | cut -d ' ' -f 1 | \"\n \"sed -e 's/[0-9]*//g'\")\n if '/dev/' not in disk:\n disk = '/dev/%s' % disk\n grep_cmd = subprocess.Popen(cmdline,\n shell=True, stdout=subprocess.PIPE)\n for booted_disk in grep_cmd.stdout:\n booted_disk = booted_disk.decode(errors='ignore')\n booted_disk = booted_disk.rstrip('\\n').strip()\n if booted_disk == disk:\n return True\n return False",
"def check_disk_usage(disk):\n du= shutil.disk_usage(disk)\n free =du.free/du.total * 100\n return free > 30",
"def check_disk_usage(disk):\n du = shutil.disk_usage(disk)\n free = du.free / du.total * 100\n return free > 20",
"def has_disk_dev(mapping, disk_dev):\n\n for disk in mapping:\n info = mapping[disk]\n if info['dev'] == disk_dev:\n return True\n return False",
"def UseExistingBootDisk(disks):\n return any(disk.get('boot', False) for disk in disks)",
"async def check_disks_availability(self, disks, allow_duplicate_serials):\n verrors = ValidationErrors()\n disks_cache = dict(map(lambda x: (x['devname'], x), await self.middleware.call('disk.query')))\n\n disks_set = set(disks)\n disks_not_in_cache = disks_set - set(disks_cache.keys())\n if disks_not_in_cache:\n verrors.add(\n 'topology',\n f'The following disks were not found in system: {\",\" .join(disks_not_in_cache)}.'\n )\n\n disks_reserved = await self.middleware.call('disk.get_reserved')\n already_used = disks_set - (disks_set - set(disks_reserved))\n if already_used:\n verrors.add(\n 'topology',\n f'The following disks are already in use: {\",\" .join(already_used)}.'\n )\n\n if not allow_duplicate_serials and not verrors:\n serial_to_disk = defaultdict(set)\n for disk in disks:\n serial_to_disk[(disks_cache[disk]['serial'], disks_cache[disk]['lunid'])].add(disk)\n for reserved_disk in disks_reserved:\n reserved_disk_cache = disks_cache.get(reserved_disk)\n if not reserved_disk_cache:\n continue\n\n serial_to_disk[(reserved_disk_cache['serial'], reserved_disk_cache['lunid'])].add(reserved_disk)\n\n if duplicate_serials := {serial for serial, serial_disks in serial_to_disk.items()\n if len(serial_disks) > 1}:\n error = ', '.join(map(lambda serial: f'{serial[0]!r} ({\", \".join(sorted(serial_to_disk[serial]))})',\n duplicate_serials))\n verrors.add('topology', f'Disks have duplicate serial numbers: {error}.')\n\n return verrors",
"def is_mounted(device):\n\n partitions = psutil.disk_partitions()\n device_path = \"/dev/\" + device\n for i in partitions:\n if i.device == device_path:\n return True\n return False",
"def check_disk_space(self):\n mm = MicroManager(self.hostname)\n drives = mm.get_disks()\n env = mm.get_env()\n for drive in drives:\n if drive['Name'].startswith(env['HOMEDRIVE']):\n if drive['TotalFreeSpace'] >= 367001600:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_PASS), ]\n else:\n return [CheckStatus(self, CheckStatus.CHECK_DISK_SPACE, CheckStatus.STATUS_FAIL, \"Only {} bytes of available disk space remain, expecting at least 367001600\"), ]",
"def _PreCheckDisks(self, ispec):\n self.diskparams = self.cfg.GetInstanceDiskParams(self.instance)\n\n inst_nodes = self.cfg.GetInstanceNodes(self.instance.uuid)\n excl_stor = compat.any(\n list(rpc.GetExclusiveStorageForNodes(self.cfg, inst_nodes).values())\n )\n\n # Get the group access type\n node_info = self.cfg.GetNodeInfo(self.instance.primary_node)\n node_group = self.cfg.GetNodeGroup(node_info.group)\n group_disk_params = self.cfg.GetGroupDiskParams(node_group)\n\n group_access_types = dict(\n (dt, group_disk_params[dt].get(\n constants.RBD_ACCESS, constants.DISK_KERNELSPACE))\n for dt in constants.DISK_TEMPLATES)\n\n # Check disk modifications. This is done here and not in CheckArguments\n # (as with NICs), because we need to know the instance's disk template\n ver_fn = lambda op, par: self._VerifyDiskModification(op, par, excl_stor,\n group_access_types)\n # Don't enforce param types here in case it's an ext disk added. The check\n # happens inside _VerifyDiskModification.\n self._CheckMods(\"disk\", self.op.disks, {}, ver_fn)\n\n self.diskmod = PrepareContainerMods(self.op.disks, None)\n\n def _PrepareDiskMod(_, disk, params, __):\n disk.name = params.get(constants.IDISK_NAME, None)\n\n # Verify disk changes (operating on a copy)\n inst_disks = self.cfg.GetInstanceDisks(self.instance.uuid)\n disks = copy.deepcopy(inst_disks)\n ApplyContainerMods(\"disk\", disks, None, self.diskmod, None, None,\n _PrepareDiskMod, None, None)\n utils.ValidateDeviceNames(\"disk\", disks)\n if len(disks) > constants.MAX_DISKS:\n raise errors.OpPrereqError(\"Instance has too many disks (%d), cannot add\"\n \" more\" % constants.MAX_DISKS,\n errors.ECODE_STATE)\n disk_sizes = [disk.size for disk in inst_disks]\n disk_sizes.extend(params[\"size\"] for (op, idx, params, private) in\n self.diskmod if op == constants.DDM_ADD)\n ispec[constants.ISPEC_DISK_COUNT] = len(disk_sizes)\n ispec[constants.ISPEC_DISK_SIZE] = disk_sizes\n\n # either --online or --offline was passed\n if self.op.offline is not None:\n if self.op.offline:\n msg = \"can't change to offline without being down first\"\n else:\n msg = \"can't change to online (down) without being offline first\"\n CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,\n msg=msg)",
"def is_part_of_disk(part_device_path, disk_device_path):\n is_part_of_disk = False\n\n if disk_device_path in part_device_path:\n is_part_of_disk = True\n elif constants.DEVICE_NAME_MPATH in disk_device_path:\n path_split = disk_device_path.split(constants.DEVICE_NAME_MPATH)\n if (path_split[0] in part_device_path and\n path_split[1] in part_device_path):\n is_part_of_disk = True\n\n return is_part_of_disk",
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):\n disk_type = params.get(\n constants.IDISK_TYPE,\n self.cfg.GetInstanceDiskTemplate(self.instance.uuid))\n\n if op == constants.DDM_ADD:\n params[constants.IDISK_TYPE] = disk_type\n\n if disk_type == constants.DT_DISKLESS:\n raise errors.OpPrereqError(\n \"Must specify disk type on diskless instance\", errors.ECODE_INVAL)\n\n if disk_type != constants.DT_EXT:\n utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)\n\n mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)\n if mode not in constants.DISK_ACCESS_SET:\n raise errors.OpPrereqError(\"Invalid disk access mode '%s'\" % mode,\n errors.ECODE_INVAL)\n\n size = params.get(constants.IDISK_SIZE, None)\n if size is None:\n raise errors.OpPrereqError(\"Required disk parameter '%s' missing\" %\n constants.IDISK_SIZE, errors.ECODE_INVAL)\n size = int(size)\n\n params[constants.IDISK_SIZE] = size\n name = params.get(constants.IDISK_NAME, None)\n if name is not None and name.lower() == constants.VALUE_NONE:\n params[constants.IDISK_NAME] = None\n\n # These checks are necessary when adding and attaching disks\n if op in (constants.DDM_ADD, constants.DDM_ATTACH):\n CheckSpindlesExclusiveStorage(params, excl_stor, True)\n # If the disk is added we need to check for ext provider\n if op == constants.DDM_ADD:\n CheckDiskExtProvider(params, disk_type)\n\n # Make sure we do not add syncing disks to instances with inactive disks\n if not self.op.wait_for_sync and not self.instance.disks_active:\n raise errors.OpPrereqError(\"Can't %s a disk to an instance with\"\n \" deactivated disks and --no-wait-for-sync\"\n \" given\" % op, errors.ECODE_INVAL)\n\n # Check disk access param (only for specific disks)\n if disk_type in constants.DTS_HAVE_ACCESS:\n access_type = params.get(constants.IDISK_ACCESS,\n group_access_types[disk_type])\n if not IsValidDiskAccessModeCombination(self.instance.hypervisor,\n disk_type, access_type):\n raise errors.OpPrereqError(\"Selected hypervisor (%s) cannot be\"\n \" used with %s disk access param\" %\n (self.instance.hypervisor, access_type),\n errors.ECODE_STATE)\n\n if op == constants.DDM_ATTACH:\n if len(params) != 1 or ('uuid' not in params and\n constants.IDISK_NAME not in params):\n raise errors.OpPrereqError(\"Only one argument is permitted in %s op,\"\n \" either %s or uuid\" % (constants.DDM_ATTACH,\n constants.IDISK_NAME,\n ),\n errors.ECODE_INVAL)\n self._CheckAttachDisk(params)\n\n elif op == constants.DDM_MODIFY:\n if constants.IDISK_SIZE in params:\n raise errors.OpPrereqError(\"Disk size change not possible, use\"\n \" grow-disk\", errors.ECODE_INVAL)\n\n disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)\n\n # Disk modification supports changing only the disk name and mode.\n # Changing arbitrary parameters is allowed only for ext disk template\",\n if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):\n utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)\n else:\n # We have to check that the 'access' and 'disk_provider' parameters\n # cannot be modified\n for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:\n if param in params:\n raise errors.OpPrereqError(\"Disk '%s' parameter change is\"\n \" not possible\" % param,\n errors.ECODE_INVAL)\n\n name = params.get(constants.IDISK_NAME, None)\n if name is not None and name.lower() == constants.VALUE_NONE:\n params[constants.IDISK_NAME] = None\n\n if op == constants.DDM_REMOVE and not self.op.hotplug:\n CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,\n msg=\"can't remove volume from a running instance\"\n \" without using hotplug\")",
"def _is_disk_checking_required(cls, node):\n if (node.status in (consts.NODE_STATUSES.ready,\n consts.NODE_STATUSES.deploying,\n consts.NODE_STATUSES.provisioned) or\n (node.status == consts.NODE_STATUSES.error and\n node.error_type != consts.NODE_ERRORS.provision)):\n return False\n\n return True",
"def disk_is_gpt(device_node):\n sfdisk_command = '{} {}'.format('sfdisk -l', device_node)\n sfdisk_process = subprocess.Popen(\n sfdisk_command, stdout=subprocess.PIPE, shell=True,\n universal_newlines=True)\n sfdisk_output = sfdisk_process.stdout.read()\n return bool(re.search('Disklabel type: gpt', sfdisk_output))",
"def is_virtual_vol_in_use(self, virtualvol):\n try:\n get_map = self.maps.get_map(virtualvol)\n except (utils.ApiException, ValueError, TypeError) as err:\n msg = \"Could not get the map view of {0} due to \"\n err_msg = msg.format(virtualvol) + \"error {0}\"\n e_msg = utils.display_error(err_msg, err)\n LOG.error(\"%s\\n%s\\n\", e_msg, err)\n self.module.fail_json(msg=e_msg)\n\n vview_list = utils.serialize_content(get_map)\n # Collect the storage view if it has virtual volume\n if len(vview_list['parents']) > 0:\n return True\n return False",
"def validDisk(disk_c, disk_old):\n\n return disk_old[0] < disk_c[0] and disk_old[1] < disk_c[1] and disk_old[2] < disk_c[2]",
"def canHostVCDN(self, AvCDN):\n\t\t\n\t\ttry:\n\t\t\tret = \t( \n\t\t\t\t((self.totalDisk - self.curDisk) > AvCDN.vDisk)\n\t\t\t\t)\n\t\t\t\t\n\t\t\t\t### In our Test Infraestructure, the OpenStacks are already to the limit, so the full condition is removed\n\t\t\t\t\n\t\t\t\t#((self.totalDisk - self.curDisk) > AvCDN.vDisk) and \\\n\t\t\t\t#((self.totalRAM - self.curRAM) > AvCDN.vRAM ) and \\\n\t\t\t\t#((self.totalCPU - self.curCPU) > AvCDN.vCPU) and \\\n\t\t\t\t#((self.totalNetBW - self.curNetBW) > AvCDN.vNetBW)\n\t\t\t\t\n\t\texcept TypeError:\n\t\t\t\n\t\t\t#self.curDisk is a value obtained from OpenStack. If at any rate this value is NULL \n\t\t\t#\t(because of problems getting OpenStack info), then we assume full capactity and return True\n\t\t\t\n\t\t\tret = True\n\t\treturn ret",
"def is_held(dev):\n assert os.path.exists(dev)\n dev = os.path.realpath(dev)\n base = get_dev_name(dev)\n\n # full disk?\n directory = '/sys/block/{base}/holders'.format(base=base)\n if os.path.exists(directory):\n return os.listdir(directory)\n\n # partition?\n part = base\n while len(base):\n directory = '/sys/block/{base}/{part}/holders'.format(part=part, base=base)\n if os.path.exists(directory):\n return os.listdir(directory)\n base = base[:-1]\n return []",
"def _is_booted_from_volume(self, instance, disk_mapping=None):\n return not bool(instance.get('image_ref'))",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"def partitions_are_in_order(disk_partitions, requested_partitions):\n\n partitions_nr = []\n\n for dp in disk_partitions:\n part_number = get_part_number(dp.get('device_path'))\n partitions_nr.append(int(part_number))\n\n for rp in requested_partitions:\n part_number = get_part_number(rp.get('device_path'))\n partitions_nr.append(int(part_number))\n\n return sorted(partitions_nr) == range(min(partitions_nr),\n max(partitions_nr) + 1)",
"def get_disk_type(vm_):\n return config.get_cloud_config_value(\n \"disk_type\", vm_, __opts__, default=\"HDD\", search_global=False\n )",
"def disk(self, disk_id):\n try:\n return self._disks[disk_id]\n except KeyError:\n util.log_error(\n \"couldn't find disk {} on vm {}\".format(disk_id, self.id)\n )\n raise",
"def check_filesystem(ssh_connection, disk_fmt, disk):\n if disk_fmt == \"squashfs\":\n return\n cmd = \"fsck.{} -n {}\".format(disk_fmt, disk)\n exit_code, _, stderr = ssh_connection.run(cmd)\n assert exit_code == 0, stderr",
"def booted_from_volume(volumes_list):\n if any('/dev/vda' in volume['attachments'] for volume in\n volumes_list):\n return True\n return False"
] | [
"0.642015",
"0.6173205",
"0.6010352",
"0.59365094",
"0.5920557",
"0.588467",
"0.5883323",
"0.5865767",
"0.58631855",
"0.5849528",
"0.581248",
"0.57283443",
"0.563945",
"0.5629074",
"0.5617215",
"0.55904806",
"0.5552183",
"0.5539992",
"0.55187285",
"0.5498951",
"0.54850733",
"0.5467647",
"0.5464022",
"0.5443361",
"0.5421549",
"0.54068005",
"0.5387623",
"0.5371184",
"0.53526455",
"0.52756536"
] | 0.71183264 | 0 |
Gets the disk storage domain name __author__ = "ratamir" | def get_disk_storage_domain_name(disk_name, vm_name=None, template_name=None):
if vm_name and template_name:
logger.error(
"Only one of the parameters vm_name or template_name "
"should be provided"
)
return None
logger.info("Get disk %s storage domain", disk_name)
if vm_name is None and template_name is None:
disk = DISKS_API.find(disk_name)
elif vm_name is not None:
disk = getVmDisk(vm_name, disk_name)
else:
disk = getTemplateDisk(template_name, disk_name)
sd_id = disk.get_storage_domains().get_storage_domain()[0].get_id()
disk_sd_name = STORAGE_DOMAIN_API.find(sd_id, 'id').get_name()
logger.info("Disk %s storage domain is: %s", disk_name, disk_sd_name)
return disk_sd_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disk_name():\n return \"%s.dat.disk\" % getpass.getuser()",
"def bucket_domain_name(self) -> str:\n ...",
"def GetDataDiskName(cls, instance):\n name = cls.DATA_DISK_NAME_FMT.format(instance=instance)\n return cls._FormalizeName(name)",
"def get_disk_file_name():\n return \"%s/%s\" % (get_user_homedir(), get_disk_name())",
"def bucket_dual_stack_domain_name(self) -> str:\n ...",
"def storage_account_name(self) -> str:\n return pulumi.get(self, \"storage_account_name\")",
"def name(self):\n _LOGGER.debug(self._shelly_cloud_device_name + ' >>> ' +\n self._shelly_cloud_entity_name + ' >>> name() >>> ' +\n self._shelly_cloud_device_name)\n return self._shelly_cloud_device_name",
"def _get_disk_name(disk_type, instance, short=False):\n prefix = '%s_' % (disk_type[0] if short else disk_type)\n base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short\n else instance.name)\n return pvm_util.sanitize_file_name_for_api(\n base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short\n else pvm_const.MaxLen.FILENAME_DEFAULT)",
"def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")",
"def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")",
"def module_name(self):\n return \"Storage\"",
"def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")",
"def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")",
"def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")",
"def _get_sd_id(name):\n cohesity_client = _get_client()\n log.info(\"Getting storage domain with name %s\", name)\n resp = cohesity_client.view_boxes.get_view_boxes(names=name)\n if resp:\n return resp[0].id",
"def get_dev_name(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetDevName', self.handle)",
"def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")",
"def bucket_domain_name(self) -> str:\n return jsii.get(self, \"bucketDomainName\")",
"def domain_dns_name(self):\n domain_dn = self.get_default_basedn()\n return domain_dn.canonical_str().split('/')[0]",
"def disk(self):\n return self.__disk",
"def getSlavename():",
"def get_pd_host(self, conn, pd, zone):\n response = conn.disks().get(project=PROJECT, zone=zone, disk=pd).execute()\n owner = response.get('users', '')\n if len(owner) > 0:\n return owner[0].split(\"/\")[-1]\n else:\n return \"\"",
"def storage_account(self) -> str:\n return pulumi.get(self, \"storage_account\")",
"def get_domain_name(self):\n return self.domain_name.get_text()",
"def get_sduuid(disk_object):\n return disk_object.get_storage_domains().get_storage_domain()[0].get_id()",
"def get_disk_name(self):\n self.clear_screen()\n default = 'nvme0n1'\n logging.info('ensure disknames are absolutely available. Otherwise OpenShift install fails')\n master_install_device = input('specify the master device that will be installed\\n'\n 'default [nvme0n1]: ')\n master_install_device = set_values(master_install_device, default)\n bootstrap_install_device = input('specify the bootstrap device that will be installed\\n'\n 'default [nvme0n1]: ')\n bootstrap_install_device = set_values(bootstrap_install_device, default)\n worker_install_device = input('specify the worker device that will be installed\\n'\n 'default [nvme0n1]: ')\n worker_install_device = set_values(worker_install_device, default)\n logging.info('adding master_install_device: {} bootstrap_install_device: {}\\\n worker_install_device: {}'.format(master_install_device, bootstrap_install_device,\n worker_install_device))\n self.inventory_dict['csah']['vars']['master_install_device'] = master_install_device\n self.inventory_dict['csah']['vars']['bootstrap_install_device'] = bootstrap_install_device\n self.inventory_dict['csah']['vars']['worker_install_device'] = worker_install_device",
"def domain(cls) -> str:\n return f'{cls.name}.wikimedia.org'",
"def get_kb_location(self):\n return ['dav',]",
"def dm_name(self):\n if self._dm_name is not None:\n return self._dm_name\n if not self.exists:\n return None\n if not os.path.exists(self.sysfs_dm_name_file):\n return None\n self.retr_dm_name()\n return self._dm_name",
"def dc_name(self):\n return self.container_name"
] | [
"0.6967801",
"0.65831107",
"0.6512975",
"0.6477021",
"0.63847244",
"0.63788724",
"0.6315478",
"0.6300419",
"0.6270765",
"0.6270765",
"0.6262309",
"0.61498886",
"0.61498886",
"0.61498886",
"0.61462027",
"0.6126907",
"0.61219066",
"0.61219066",
"0.61067355",
"0.61029404",
"0.6074433",
"0.6064661",
"0.6063925",
"0.6052987",
"0.6049563",
"0.6045422",
"0.6015102",
"0.60130817",
"0.6005014",
"0.59520316"
] | 0.6622522 | 1 |
Export a disk to glance repository | def export_disk_to_glance(
positive, disk, target_domain, async=False, attr='id'
):
storage_domain = STORAGE_DOMAIN_API.find(target_domain)
disk = DISKS_API.find(disk, attribute=attr)
if not DISKS_API.syncAction(
disk, 'export', storage_domain=storage_domain, positive=positive,
async=async
):
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save(self, export_path: str):",
"def export_to_gsas():\n # Get workflow\n work_flow = my_data.get()\n\n output_file_name = '/tmp/acceptance_test.gda'\n\n # Clear the file if it exists.\n if os.path.exists(output_file_name):\n os.remove(output_file_name)\n\n status = work_flow.export_gsas_file(run_number=80231)\n assert status\n assert os.path.exists(output_file_name)",
"def test_export(self):\n structure = {\n \"README.rst\": \"Hi this is 1.0.0.\",\n \"twisted\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted\", 1, 0, 0),\n \"web\": {\n \"newsfragments\": {\"README\": \"Hi this is 1.0.0\"},\n \"_version.py\": genVersion(\"twisted.web\", 1, 0, 0),\n },\n },\n }\n reposDir = self.makeRepository(self.tmpDir)\n self.createStructure(reposDir, structure)\n self.commitRepository(reposDir)\n\n exportDir = FilePath(self.mktemp()).child(\"export\")\n self.createCommand.exportTo(reposDir, exportDir)\n self.assertStructure(exportDir, structure)",
"def export_library(self):\n filename = tkFileDialog.asksaveasfilename(initialdir = self.cwd, title = \"Save glycan library\", filetypes = ((\"db files\",\"*.db\"),(\"all files\",\"*.*\")))\n self.export_glycans(filename, self.user_glycans)",
"def test_download_to_file(req, tmpdir):\n req.get(ENTREZ_URL, text='This works.')\n outdir = tmpdir.mkdir('outdir')\n filename = outdir.join('foo')\n expected = outdir.join('foo.gbk')\n config = core.Config(molecule='nucleotide', verbose=False)\n\n core.download_to_file('FOO', config, filename=filename)\n\n assert expected.check()",
"def save_model(model):\n model.to_disk(\"../model/custom_ner_model\")",
"def export_model(self, save_path: str, save_format: Optional[str] = None) -> None:",
"def export(bento_tag: str, out_path: str) -> None: # type: ignore (not accessed)\n bento = bento_store.get(bento_tag)\n out_path = bento.export(out_path)\n logger.info(\"%s exported to %s.\", bento, out_path)",
"def BT_export(self):\n src = os.path.join(self.resMan.base_path, Config.instance().weld_BT_root_folder)\n srcs=self.BTMan.get_subdirs(src)\n dst = os.path.join(self.project.rootdir, Config.instance().weld_BT_root_folder)\n #this operation has lots of exceptions to output...\n try:\n for src in srcs:\n self.BTMan.export(src, dst)\n except Exception, e:\n print >> sys.__stderr, 'ERROR in Weld.BT_export():'\n print >> sys.__stderr, e.args[0]\n print >> sys.__stderr, 'export cancelled (some cleanup might be needed in %s)' % dst",
"def file(c, path=local.http_path):\r\n c = conn(c)\r\n print(\"make file repo on {}, path [{}]\".format(c.host, path))\r\n\r\n system.install(c, 'createrepo')\r\n c.run('createrepo {}'.format(path))",
"def test_export(api):\n # upload file to file.io servers\n uploaded_file = api.upload(\n tag='test_file',\n expiry='1d',\n path='tests/test_file.txt'\n )\n\n # check that instance of FileIO has these fields\n assert uploaded_file.link\n assert uploaded_file.key\n assert uploaded_file.tag\n assert uploaded_file.path\n\n # check that the uploaded file was added to uploaded files list\n assert api.show_uploads()\n\n # testing that export works\n api.export('tests/files_data.json')\n\n # check that the exported file exists\n assert path.isfile('tests/files_data.json')\n\n remove('tests/files_data.json')\n\n # testing that export in pkl works\n api.export(out_type='pkl')\n\n # check that the exported file exists\n assert path.isfile('exported.pkl')\n\n remove('exported.pkl')\n\n # testong that export in pkl works\n api.export('tests/exported.pkl')\n\n # check that the exported file exists\n assert path.isfile('tests/exported.pkl')\n\n remove('tests/exported.pkl')\n\n # testing that export in json with default path works\n api.export()\n\n # check that exported file exists\n assert path.isfile('exported.json')\n\n remove('exported.json')\n\n # check that export with provided path works\n api.export('tests/exporte.d.pkl', out_type='json')\n\n # testing that export works\n assert path.isfile('tests/exporte.d.pkl.json')\n\n remove('tests/exporte.d.pkl.json')\n\n # check that export works correctly with strange path\n api.export('tests/t.e.s.t.p.k.l', out_type='pkl')\n\n # testing that export works\n assert path.isfile('tests/t.e.s.t.p.k.l.pkl')\n\n remove('tests/t.e.s.t.p.k.l.pkl')",
"def DoExport(options, args):\n if len(args) != 1:\n raise gclient_utils.Error(\"Need directory name\")\n client = GClient.LoadCurrentConfig(options)\n\n if not client:\n raise gclient_utils.Error(\"client not configured; see 'gclient config'\")\n\n if options.verbose:\n # Print out the .gclient file. This is longer than if we just printed the\n # client dict, but more legible, and it might contain helpful comments.\n print(client.ConfigContent())\n return client.RunOnDeps('export', args)",
"def save(variant='default', version='NEXT'):\n\n if env.shotgun:\n mc.warning('This feature is not available in a shotgun environment.')\n\n else:\n env.save_stream('rig', token=variant, version=version, workfile=0)",
"def do_export(args):\n outfile_name = args.outfile_name.split('.')\n outfile_ext = 'txt'\n if len(outfile_name) > 1:\n (outfile_name, outfile_ext) = outfile_name\n else:\n outfile_name = outfile_name[0]\n\n secret = subprocess.Popen(['gpg', '--export-secret-key', args.keyid], stdout=subprocess.PIPE)\n paperkey = subprocess.check_output(['paperkey', '--output-type', 'raw'], stdin=secret.stdout)\n base64str = base64.b64encode(paperkey)\n chunks = chunk_up(base64str, args.numfiles)\n\n for i, chunk in enumerate(chunks):\n if args.png:\n (_, _, image) = qrencode.encode_scaled(chunk, int(args.size))\n image.save('%s%d.png' % (outfile_name, i+1), 'PNG')\n if args.base64:\n with open('%s%d.%s' % (outfile_name, i+1, outfile_ext), 'wb') as txt_file:\n txt_file.write(chunk)",
"def test_download_terrascope():\n\n s1_belgium.download(\"sigma0_cube_terrascope.nc\",format=\"NetCDF\")",
"def export(ctx):\n LOG.info(\"Running scout export\")",
"def checkout(url, version=None):\n from grit import Repo\n r = Repo(url)\n\n def _write(item):\n log.debug('writing: %s' % item.name)\n if item.type != 'blob':\n return\n if r.type in ['repo', 'proxy', 'local']:\n path = os.path.join(r.name, item.path)\n pdir = os.path.dirname(path)\n if not os.path.isdir(pdir):\n os.makedirs(pdir)\n else:\n path = item.name\n\n f = open(path, 'w')\n f.write(item.data())\n f.close()\n\n if r.type == 'blob':\n _write(r)\n else:\n items = r.items()\n count = 1\n total = len(items)\n while count <= total:\n print '[%s/%s] %0.2f%%' %(count, total, (float(count) / total) * 100), '*'*count, '\\r',\n _write(items[count-1])\n count += 1\n sys.stdout.flush()\n print",
"def download_report():\n entities = get_names()\n save_csv(entities)",
"def download():\r\n reader = GSODDataReader()\r\n year_list = range(2001, 2012)\r\n austin = reader.collect_data(year_list, exact_station=True,\r\n station_name='AUSTIN CAMP MABRY', state='TX', country='US')\r\n houston = reader.collect_data(year_list, exact_station=True,\r\n station_name='HOUSTON/D.W. HOOKS', state='TX', country='US')\r\n new_york = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEW YORK/LA GUARDIA', state='NY', country='US')\r\n newark = reader.collect_data(year_list, exact_station=True,\r\n station_name='NEWARK INTL AIRPORT', state='NJ', country='US')\r\n punta_arenas = reader.collect_data(year_list, exact_station=True,\r\n station_name='PUNTA ARENAS', country='CH')\r\n wellington = reader.collect_data(year_list, exact_station=True,\r\n station_name='WELLINGTON AIRPORT', country='NZ')\r\n store = HDFStore('weather.h5')\r\n store['austin'] = austin\r\n store['houston'] = houston\r\n store['nyc'] = new_york\r\n store['newark'] = newark\r\n store['punta_arenas'] = punta_arenas\r\n store['wellington'] = wellington\r\n store.close()",
"def _download_sst(cls):\n path = Path(cls.dataset_path)\n if path.exists():\n return\n\n path.mkdir(parents=True, exist_ok=True)\n generic_download(\n url=\"https://s3.amazonaws.com/enso-data/SST-binary.csv\",\n text_column=\"Text\",\n target_column=\"Target\",\n filename=SST_FILENAME\n )",
"def _Download( self ):\n self._DownloadPipe += PackageUtil.ExecuteSimpleCommand( \"git\", [\"clone\", \"[email protected]:mastbaum/avalanche.git\", self.GetInstallPath()], None, os.getcwd() )\n return",
"def publish(self):\n # Write the models locally\n local_path_dist = self.dump_distributions()\n local_path_model = self.dump_model()\n\n # Write them to cloud storage\n bucket_path_dist = self.get_bucket_path(self.filename_distributions)\n bucket_path_model = self.get_bucket_path(self.filename_model)\n\n config = self.services.config\n lake = self.services.lake\n\n\n lake.upload(bucket_path_dist, local_path_dist, bucket_name=config.lake_bucket)\n lake.upload(bucket_path_model, local_path_model, bucket_name=config.lake_bucket)\n\n # Now finally we want to write our reference file to our repository and build a merge request\n reference = {\n \"model\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_model,\n \"md5\": file_md5(local_path_model),\n },\n \"distributions\": {\n \"bucket\": config.lake_bucket,\n \"path\": bucket_path_dist,\n \"md5\": file_md5(local_path_dist),\n },\n }\n\n return reference",
"def save_to_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.save_to_disk(file_name)",
"def export_ctsdg(cfg):\n generator = Generator(\n image_in_channels=config.image_in_channels,\n edge_in_channels=config.edge_in_channels,\n out_channels=config.out_channels\n )\n generator.set_train(False)\n load_checkpoint(cfg.checkpoint_path, generator)\n\n ckpt_path = Path(cfg.checkpoint_path)\n output_file_name = (ckpt_path.parent / ckpt_path.stem).as_posix()\n file_format = config.file_format\n\n img_dummy = mnp.zeros([1, config.image_in_channels, *cfg.image_load_size],\n dtype=mstype.float32)\n edge_dummy = mnp.zeros([1, 2, *cfg.image_load_size], dtype=mstype.float32)\n mask_dummy = mnp.zeros([1, 1, *cfg.image_load_size], dtype=mstype.float32)\n\n export(generator, img_dummy, edge_dummy, mask_dummy,\n file_name=output_file_name, file_format=file_format)\n\n print(f'{output_file_name}.mindir exported successfully!', flush=True)",
"def export_card(self, repo, file_name, card_name, file_format='CSV'):\n DataHubManager.has_repo_file_privilege(\n self.username, self.repo_base, repo, 'write')\n\n card = Card.objects.get(repo_base=self.repo_base,\n repo_name=repo, card_name=card_name)\n query = card.query\n\n # to export a card, the user must be able to successfully execute\n # the query from their own database user.\n try:\n self.execute_sql(query)\n except Exception:\n raise PermissionDenied(\n 'Either missing required privileges or bad query')\n\n # create the user data folder if it doesn't already exist\n DataHubManager.create_user_data_folder(self.repo_base, repo)\n\n file_name = clean_file_name(file_name)\n file_path = user_data_path(\n self.repo_base, repo, file_name, file_format)\n\n self.user_con.export_query(query=query,\n file_path=file_path,\n file_format=file_format)",
"def _evolve2disk(self, **kwargs) -> str:\n raise NotImplementedError('evolve2disk is not yet implemented.')",
"def dest_repo_tree(dest_repo_no_tree):\n repo = dest_repo_no_tree\n\n # Create and commit a file\n fpath = os.path.join(repo.working_dir, \"something.txt\")\n with open(fpath, \"w\") as f:\n f.write(\"Mundul vult decipi, ergo decipiatur.\")\n repo.index.add([fpath])\n repo.index.commit(\"Dummy commit\")\n\n yield repo",
"def clone(args):\n osf = _setup_osf(args)\n project = osf.project(args.project)\n output_dir = args.project\n if args.output is not None:\n output_dir = args.output\n\n with tqdm(unit='files') as pbar:\n for store in project.storages:\n prefix = os.path.join(output_dir, store.name)\n\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n path = os.path.join(prefix, path)\n if os.path.exists(path) and args.update:\n if checksum(path) == file_.hashes.get('md5'):\n continue\n directory, _ = os.path.split(path)\n makedirs(directory, exist_ok=True)\n\n with open(path, \"wb\") as f:\n file_.write_to(f)\n\n pbar.update()",
"def extract_to_disk(self):\n archive_name, extension = os.path.splitext(os.path.basename(self.file.name))\n if not os.path.isdir(os.path.join(os.getcwd(), archive_name)):\n os.mkdir(archive_name)\n os.chdir(archive_name)\n for filename, data in self.extract().items():\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()",
"def export_to_gral(\n inventory: Inventory, grid: GralGrid, path: os.PathLike, polygon_raster_size\n) -> None:\n\n writer = EmissionWriter(Path(path), inventory, grid, polygon_raster_size)\n\n writer.write_gdfs()"
] | [
"0.61116695",
"0.59267706",
"0.5841884",
"0.5634184",
"0.5515597",
"0.5503389",
"0.54823846",
"0.54743654",
"0.5460788",
"0.53573567",
"0.53079253",
"0.5305249",
"0.52523184",
"0.5226656",
"0.52151257",
"0.5214028",
"0.5204235",
"0.5194055",
"0.51850533",
"0.5168795",
"0.51637876",
"0.51444614",
"0.5132751",
"0.51083386",
"0.5103356",
"0.5101712",
"0.50943446",
"0.50869143",
"0.50851685",
"0.5084363"
] | 0.6461685 | 0 |
Get list of disk objects from API | def get_all_disks():
return DISKS_API.get(abs_link=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def ListVdisks(self, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks\"\n return self.client.get(uri, None, headers, query_params, content_type)",
"def list_filesystem(self, headers=None, **kwargs):\n logger.debug('Listing filesystem ...')\n resource = 'account'\n params = get_params(parameters=locals(), exclusions=['self', 'filesystem_identifier', 'headers'])\n response = self._get(params=params, headers=headers)\n return response.json() if response.content else {}",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"async def get_disks(self, oid):\n pool = await self.query([('id', '=', oid)], {'get': True})\n if not pool['is_decrypted']:\n yield\n async for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):\n yield i",
"def database_volume_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.Volume)\n\n volume_objs = list()\n for volume in query.all():\n nfvi_volume_data = json.loads(volume.nfvi_volume_data)\n nfvi_volume = nfvi.objects.v1.Volume(nfvi_volume_data['uuid'],\n nfvi_volume_data['name'],\n nfvi_volume_data['description'],\n nfvi_volume_data['avail_status'],\n nfvi_volume_data['action'],\n nfvi_volume_data['size_gb'],\n nfvi_volume_data['bootable'],\n nfvi_volume_data['encrypted'],\n nfvi_volume_data['image_uuid'])\n volume_obj = objects.Volume(nfvi_volume)\n volume_objs.append(volume_obj)\n return volume_objs",
"def ls(**params):\n params = _clean_salt_variables(params)\n\n endpoint = \"devices\"\n\n # Change endpoint if there are params to filter by:\n if params:\n endpoint = \"resources\"\n\n # Convert all ints to strings:\n for key, val in params.items():\n params[key] = str(val)\n\n api_response = requests.get(\n \"https://api.serverdensity.io/inventory/{}\".format(endpoint),\n params={\n \"token\": get_sd_auth(\"api_token\"),\n \"filter\": salt.utils.json.dumps(params),\n },\n )\n log.debug(\"Server Density API Response: %s\", api_response)\n log.debug(\"Server Density API Response content: %s\", api_response.content)\n if api_response.status_code == 200:\n try:\n return salt.utils.json.loads(api_response.content)\n except ValueError:\n log.error(\n \"Could not parse Server Density API Response content: %s\",\n api_response.content,\n )\n raise CommandExecutionError(\n \"Failed to create, Server Density API Response: {}\".format(api_response)\n )\n else:\n return None",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def get_all(self, isystem_uuid=None, marker=None, limit=None,\n sort_key='id', sort_dir='asc'):\n\n return self._get_controller_fs_collection(isystem_uuid, marker, limit,\n sort_key, sort_dir)",
"def find_all():\n return ItopapiPrototype.find_all(ItopapiStorageSystem)",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def ls(self, path):\n try:\n response = self._fetch_json('/api/ls' + path)\n if 'content' in response:\n return response['content']\n else:\n return [ response ]\n except HTTPError as err:\n raise RuntimeError(\"Unable to access to '{}'\".format(path))",
"async def fetch_file_list(client, bucket) -> List:\n # pylint: disable=invalid-name\n PG_HOSTNAME = config('PG_HOSTNAME')\n PG_DATABASE = config('PG_DATABASE')\n folder = f'backup/{PG_HOSTNAME}_{PG_DATABASE}'\n result = await client.list_objects_v2(Bucket=bucket, Prefix=folder)\n contents = result.get('Contents', None)\n file_list = list([])\n if contents:\n for content in contents:\n file_list.append(content.get('Key'))\n return file_list",
"def api_files():\n files = FileWrapper.get_files(g.user.id)\n return jsonify([f.serialize() for f in files])",
"def fusion_api_get_storage_volumes(self, uri=None, param='', api=None, headers=None):\n return self.volume.get(uri=uri, api=api, headers=headers, param=param)",
"def search_files_folders(request,**kwargs):\n name = request.data['name']\n files_and_folders = FileSystem.objects.filter(name__icontains=name, status=\"CREATED\")\n\n response_list = []\n dictionary = {}\n for fsobject in files_and_folders :\n fs_object = convert_fsobject_to_fstypeobject(fsobject)\n if fs_object.path in dictionary.keys() :\n for fs_object in FileSystem.objects.filter(path=fs_object.path,status='CREATED').order_by('-creation_datetime')[:2] :\n try :\n response = requests.get(fs_object.location)\n if response.status_code == 200 :\n break\n except requests.ConnectionError :\n pass\n print\"final object location\", fs_object.location\n dictionary[fs_object.path]=fs_object\n else :\n dictionary[fs_object.path]=fs_object\n\n for fs_object in dictionary.values():\n fs_object.creation_datetime = convert_datetime_to_string(fs_object.creation_datetime)\n data = FileSystemSerializer(fs_object).data\n response_list.append(data)\n\n\n return Response(data=response_list,status=status.HTTP_200_OK)",
"def list_hnd(self, request, **kwargs):\n prefix = request.POST.get(\"prefix\", \"\")\n marker = request.POST.get(\"marker\", \"\")\n delimiter = request.POST.get(\"delimiter\", \"\")\n\n max_keys = int(request.POST.get(\"max_keys\", 1000))\n max_keys = max((1, max_keys)) # >= 1\n max_keys = min((1000, max_keys)) # <= 1000\n\n bucket = store.get_bucket()\n\n # prefix \"prefix\" with user dir\n eff_prefix = store.prefix_with_user_dir(request.user, prefix)\n\n # get list iterator from s3\n file_iter = bucket.list(prefix=eff_prefix, delimiter=delimiter,\n marker=marker, headers=None,\n encoding_type=None)\n\n # convert to list, try to get +1 item to be able\n # to determine if the results are truncated\n files = [key.key.split(\"/\", 1)[1]\n for key in itertools.islice(file_iter, 0, max_keys+1)]\n\n # if max_keys is less then there are more results\n # -> truncated = True\n truncated = len(files) > max_keys\n if truncated:\n # return 1 item less\n files = files[:-1]\n\n return self.create_response(request, {\n \"files\": files,\n \"truncated\": truncated\n })",
"async def list(request):\n dict_answer = {'models': [item[1]+' '+item[0]+str(item[2:]) for item in models_db],\n 'datasets': [conv_time(d.stat().st_atime)+' '+str(d.name) for d in Path('data/datasets/').glob('*')],\n }\n return web.json_response(dict_answer)",
"def get_objects(si, args):\n # Get datacenter object.\n datacenter_list = si.content.rootFolder.childEntity\n \"\"\"\n if args.datacenter_name:\n datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)\n else:\n \"\"\"\n datacenter_obj = datacenter_list[0]\n\n # Get datastore object.\n datastore_list = datacenter_obj.datastoreFolder.childEntity\n \"\"\"if args.datastore_name:\n datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)\n elif len(datastore_list) > 0:\"\"\"\n datastore_obj = datastore_list[0]\n #else:\n # print \"No datastores found in DC (%s).\" % datacenter_obj.name\n\n # Get cluster object.\n cluster_list = datacenter_obj.hostFolder.childEntity\n \"\"\"if args.cluster_name:\n cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)\n elif len(cluster_list) > 0:\"\"\"\n cluster_obj = cluster_list[0]\n #else:\n # print \"No clusters found in DC (%s).\" % datacenter_obj.name\n\n # Generate resource pool.\n resource_pool_obj = cluster_obj.resourcePool\n\n return {\"datacenter\": datacenter_obj,\n \"datastore\": datastore_obj\n ,\"resource pool\": resource_pool_obj}",
"def getDiskDrives(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_DISKS}', params=params)",
"def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def getObjectFiles(self, obj):\n filelist = list()\n\n fileurl = 'objects/{0}/files'.format(obj.id)\n\n fl = self.iterateAllPaginated(fileurl)\n\n for f in fl:\n res = self.getFile(f['selfUrl'])\n filelist.append(res)\n return filelist",
"def boxSearch(client):\n\tfiles = []\n\titems_iter = client.folder(folder_id=0).get_items(limit=100, offset=0)\n\tfor x in items_iter:\n\t\tfiles.append(x)\n\treturn files",
"def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])",
"def test_select_by_filesystem(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n\n response = self.api_client.get(\"/api/target/\", data={\"filesystem_id\": self.fs.id})\n self.assertHttpOK(response)\n content = json.loads(response.content)\n self.assertEqual(3, len(content[\"objects\"]))\n\n response = self.api_client.get(\"/api/target/\", data={\"filesystem_id\": -1000})\n self.assertHttpOK(response)\n content = json.loads(response.content)\n self.assertEqual(0, len(content[\"objects\"]))",
"def get_all(self, start_at, limit, order=None):\n result = []\n objects = []\n if limit == 0:\n objects = self.items[start_at:]\n else:\n objects = self.items[start_at:(start_at + limit)]\n for item in objects:\n result.append(FileDict(item))\n return result",
"def get_overage_disks_json(disk_list):\n\t\tpass"
] | [
"0.7016373",
"0.69284606",
"0.68586487",
"0.66986144",
"0.6593451",
"0.6314186",
"0.6287725",
"0.619246",
"0.61885196",
"0.61516696",
"0.6150911",
"0.61330783",
"0.6132069",
"0.61276704",
"0.6107391",
"0.6061154",
"0.60491234",
"0.6045728",
"0.60329306",
"0.6027059",
"0.6023447",
"0.59995544",
"0.59899294",
"0.598652",
"0.5981486",
"0.5973287",
"0.5970636",
"0.59585655",
"0.5947925",
"0.5946715"
] | 0.71823597 | 0 |
Creates a disk attachment object | def prepare_disk_attachment_object(disk_id=None, **kwargs):
disk = kwargs.pop("disk", None)
disk_obj = disk if disk else prepare_ds_object("Disk", id=disk_id)
return prepare_ds_object("DiskAttachment", disk=disk_obj, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_attachment1():\n \n attachment = Attachment()\n attachment.file_content = (\"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl\"\n \"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12\")\n attachment.file_type = \"application/pdf\"\n attachment.file_name = \"balance_001.pdf\"\n attachment.disposition = \"attachment\"\n attachment.content_id = \"Balance Sheet\"\n return attachment",
"def _createAttachment(self, filePath):\r\n\t\t#encode attachment\r\n\t\twith open(filePath, 'rb') as f:\r\n\t\t\tdata = f.read()\r\n\t\tencoded_file = base64.b64encode(data).decode()\r\n\r\n\t\tname = self._getFileName(filePath)\r\n\t\t#create attachment object\r\n\t\tattachedFile = Attachment()\r\n\t\tattachedFile.file_content = FileContent(encoded_file)\r\n\t\tattachedFile.file_name = FileName(name)\r\n\t\tattachedFile.file_type = FileType(mimetypes.guess_type(name)[0])\r\n\t\tattachedFile.disposition = Disposition('attachment')\r\n\r\n\t\treturn attachedFile",
"def _create_attachment(self, filename, content, mimetype=None):\n if mimetype is None:\n mimetype, _ = mimetypes.guess_type(filename)\n if mimetype is None:\n mimetype = DEFAULT_ATTACHMENT_MIME_TYPE\n basetype, subtype = mimetype.split('/', 1)\n if basetype == 'text':\n encoding = self.encoding or settings.DEFAULT_CHARSET\n attachment = SafeMIMEText(smart_str(content,\n settings.DEFAULT_CHARSET), subtype, encoding)\n else:\n # Encode non-text attachments with base64.\n attachment = MIMEBase(basetype, subtype)\n attachment.set_payload(content)\n encoders.encode_base64(attachment)\n if filename:\n try:\n filename = filename.encode('ascii')\n except UnicodeEncodeError:\n filename = Header(filename, 'utf-8').encode()\n attachment.add_header('Content-Disposition', 'attachment',\n filename=filename)\n return attachment",
"def create(self, disk):\n logging.info('Adding type %d partition to disk image: %s' % (self.type, disk.filename))\n run_cmd('parted', '--script', '--', disk.filename, 'mkpart', 'primary', self.parted_fstype(), self.begin, self.end)",
"def createDisk(self , name):\n return",
"def create_model(session,\n content_id: int = 1,\n attachment_info: dict = Models.FILE_1):\n attachment = AttachmentModel(id=attachment_info['id'],\n file_name=attachment_info['file_name'],\n file_bytes=base64.b64decode(attachment_info['file_bytes']),\n attach_order=attachment_info['attach_order'],\n content_id=content_id)\n session.add(attachment)\n session.commit()\n attachment = session.merge(attachment)\n\n return attachment",
"def create_disk(auth_parms, prod_offer, disk_size, disk_name, vdc_uuid):\n\n # get product offer uuid for the disk in question\n prod_offer_uuid = get_prod_offer_uuid(auth_parms, prod_offer)\n\n disk_job = rest_create_disk(auth_parms, vdc_uuid, prod_offer_uuid, disk_name, disk_size)\n\n disk_uuid = disk_job['itemUUID']\n print(\"New disk UUID=\" + disk_uuid)\n\n # Check the job completes\n status = wait_for_job(auth_parms, disk_job['resourceUUID'], \"SUCCESSFUL\", 90)\n if (status != 0):\n raise Exception(\"Failed to add create disk (uuid=\" + disk_uuid + \")\")\n\n return disk_uuid",
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def __init__(self, attachment=None, *, parent=None, **kwargs):\n kwargs.setdefault('protocol', getattr(parent, 'protocol', None))\n kwargs.setdefault('main_resource',\n getattr(parent, 'main_resource', None))\n\n super().__init__(**kwargs)\n self.name = None\n self.attachment_type = 'file'\n self.attachment_id = None\n self.content_id = None\n self.is_inline = False\n self.attachment = None\n self.content = None\n self.on_disk = False\n self.on_cloud = kwargs.get('on_cloud', False)\n self.size = None\n\n if attachment:\n if isinstance(attachment, dict):\n if self._cloud_data_key in attachment:\n # data from the cloud\n attachment = attachment.get(self._cloud_data_key)\n self.attachment_id = attachment.get(self._cc('id'), None)\n self.content_id = attachment.get(self._cc('contentId'), None)\n self.is_inline = attachment.get(self._cc('IsInline'), False)\n self.name = attachment.get(self._cc('name'), None)\n self.content = attachment.get(self._cc('contentBytes'),\n None)\n self.attachment_type = 'item' if 'item' in attachment.get(\n '@odata.type', '').lower() else 'file'\n self.on_disk = False\n self.size = attachment.get(self._cc('size'), None)\n else:\n file_path = attachment.get('path', attachment.get('name'))\n if file_path is None:\n raise ValueError('Must provide a valid \"path\" or '\n '\"name\" for the attachment')\n self.content = attachment.get('content')\n self.on_disk = attachment.get('on_disk')\n self.attachment_id = attachment.get('attachment_id')\n self.attachment = Path(file_path) if self.on_disk else None\n self.name = (self.attachment.name if self.on_disk\n else attachment.get('name'))\n self.size = self.attachment.stat().st_size if self.attachment else None\n\n elif isinstance(attachment, str):\n self.attachment = Path(attachment)\n self.name = self.attachment.name\n elif isinstance(attachment, Path):\n self.attachment = attachment\n self.name = self.attachment.name\n elif isinstance(attachment, (tuple, list)):\n # files with custom names or Inmemory objects\n file_obj, custom_name = attachment\n if isinstance(file_obj, BytesIO):\n # in memory objects\n self.size = file_obj.getbuffer().nbytes\n self.content = base64.b64encode(file_obj.getvalue()).decode('utf-8')\n else:\n self.attachment = Path(file_obj)\n self.name = custom_name\n\n elif isinstance(attachment, AttachableMixin):\n # Object that can be attached (Message for example)\n self.attachment_type = 'item'\n self.attachment = attachment\n self.name = attachment.attachment_name\n self.content = attachment.to_api_data()\n self.content['@odata.type'] = attachment.attachment_type\n\n if self.content is None and self.attachment and self.attachment.exists():\n with self.attachment.open('rb') as file:\n self.content = base64.b64encode(file.read()).decode('utf-8')\n self.on_disk = True\n self.size = self.attachment.stat().st_size",
"def create_disk(self, disk):\n spec = {\n 'new_vmdk': {\n # Convert from mebibytes to bytes because VMDK is specified in bytes\n 'capacity': 1024\n * 1024\n * disk.size,\n }\n }\n\n try:\n backend_id = self.client.create_disk(disk.vm.backend_id, spec)\n except VMwareError as e:\n raise VMwareBackendError(e)\n else:\n disk.backend_id = backend_id\n disk.save(update_fields=['backend_id'])\n signals.vm_updated.send(self.__class__, vm=disk.vm)\n return disk",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def new_attachment(self, context, payload):\n\n message_id = payload['id']\n parts = payload['payload']['parts']\n\n for part in parts:\n if part['mimeType'] == \"application/octet-stream\" and part['filename']:\n att_id = part['body']['attachmentId']\n\n data = {\n \"message_id\": message_id,\n \"attachment_id\": att_id\n }\n\n return GmailApi.attachment(context, data)",
"def _AttachDisk(self, idx, params, _):\n uuid = params.get(\"uuid\", None)\n name = params.get(constants.IDISK_NAME, None)\n\n disk = self.GenericGetDiskInfo(uuid, name)\n\n # Rename disk before attaching (if disk is filebased)\n if disk.dev_type in constants.DTS_INSTANCE_DEPENDENT_PATH:\n # Add disk size/mode, else GenerateDiskTemplate will not work.\n params[constants.IDISK_SIZE] = disk.size\n params[constants.IDISK_MODE] = str(disk.mode)\n dummy_disk = self._GenerateDiskTemplateWrapper(idx, disk.dev_type, params)\n new_logical_id = dummy_disk.logical_id\n result = self.rpc.call_blockdev_rename(self.instance.primary_node,\n [(disk, new_logical_id)])\n result.Raise(\"Failed before attach\")\n self.cfg.SetDiskLogicalID(disk.uuid, new_logical_id)\n disk.logical_id = new_logical_id\n\n # Attach disk to instance\n self.cfg.AttachInstanceDisk(self.instance.uuid, disk.uuid, idx)\n\n # re-read the instance from the configuration\n self.instance = self.cfg.GetInstanceInfo(self.instance.uuid)\n\n changes = [\n (\"disk/%d\" % idx,\n \"attach:size=%s,mode=%s\" % (disk.size, disk.mode)),\n ]\n\n disks_ok, _, payloads = AssembleInstanceDisks(self, self.instance,\n disks=[disk])\n if not disks_ok:\n changes.append((\"disk/%d\" % idx, \"assemble:failed\"))\n return disk, changes\n\n if self.op.hotplug:\n _, link_name, uri = payloads[0]\n msg = self._HotplugDevice(constants.HOTPLUG_ACTION_ADD,\n constants.HOTPLUG_TARGET_DISK,\n disk, (link_name, uri), idx)\n changes.append((\"disk/%d\" % idx, msg))\n\n return (disk, changes)",
"def attachDisk(\n positive, alias, vm_name, active=True, read_only=False, disk_id=None,\n interface='virtio', bootable=None,\n):\n if disk_id:\n name = disk_id\n attribute = 'id'\n else:\n name = alias\n attribute = 'name'\n disk_object = get_disk_obj(name, attribute)\n # This is only needed because for legacy reason we also want to modify\n # the read_only property when we attach a disk\n # Also for attaching a disk the active parameter is pass inside the disk\n # object\n updated_disk = _prepareDiskObject(\n id=disk_object.get_id(), read_only=read_only\n )\n vm_disks = getObjDisks(vm_name)\n logger.info(\"Attaching disk %s to vm %s\", alias, vm_name)\n disk_attachment = prepare_disk_attachment_object(\n updated_disk.get_id(), interface=interface, bootable=bootable,\n disk=updated_disk, active=active\n )\n return DISK_ATTACHMENTS_API.create(\n disk_attachment, positive, collection=vm_disks\n )[1]",
"def _prepareDiskObject(**kwargs):\n storage_domain_name = kwargs.pop('storagedomain', None)\n\n # Tuple (lun_address, lun_target, lun_id, lun_port)\n lun = (kwargs.pop('lun_address', None), kwargs.pop('lun_target', None),\n kwargs.pop('lun_id', None), kwargs.pop('lun_port', 3260))\n # Tuple (username, password)\n lun_creds = (kwargs.pop('lun_username', None),\n kwargs.pop('lun_password', None))\n type_ = kwargs.pop('type_', None)\n\n storage_connection = kwargs.pop('storage_connection', None)\n\n if lun != (None, None, None, 3260) and storage_connection:\n logger.error(\n \"You cannot set storage connection id and LUN params in one call!\")\n return None\n kwargs.pop('active', None)\n\n disk = kwargs.pop('update', None)\n if disk is None:\n disk = data_st.Disk(**kwargs)\n\n if storage_connection is not None:\n storage = data_st.HostStorage()\n storage.id = storage_connection\n disk.set_lun_storage(storage)\n\n if storage_domain_name is not None:\n storage_domain = STORAGE_DOMAIN_API.find(storage_domain_name,\n NAME_ATTR)\n storage_domains = data_st.StorageDomains()\n storage_domains.add_storage_domain(storage_domain)\n disk.storage_domains = storage_domains\n\n # quota\n quota_id = kwargs.pop('quota', None)\n if quota_id == '':\n disk.set_quota(data_st.Quota())\n elif quota_id:\n disk.set_quota(data_st.Quota(id=quota_id))\n\n if lun != (None, None, None, 3260):\n direct_lun = data_st.LogicalUnit(address=lun[0], target=lun[1],\n id=lun[2], port=lun[3])\n if lun_creds != (None, None):\n direct_lun.set_username(lun_creds[0])\n direct_lun.set_password(lun_creds[1])\n\n logical_units = data_st.LogicalUnits(logical_unit=[direct_lun])\n disk.set_lun_storage(\n data_st.HostStorage(logical_units=logical_units, type_=type_)\n )\n\n # id\n disk_id = kwargs.pop('id', None)\n if disk_id:\n disk.set_id(disk_id)\n\n # read_only\n read_only = kwargs.pop('read_only', None)\n if read_only is not None:\n disk.set_read_only(read_only)\n\n # snapshot\n snapshot = kwargs.pop('snapshot', None)\n if snapshot:\n disk.set_snapshot(snapshot)\n\n # description\n description = kwargs.pop('description', None)\n if description is not None:\n disk.set_description(description)\n\n # qcow_version\n qcow_version = kwargs.pop('qcow_version', None)\n if qcow_version:\n disk.set_qcow_version(qcow_version)\n\n return disk",
"def test_attachment_create_no_connector(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('null', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def disk_create(context, values):\n return NotImplemented",
"def test_attachment_create_readonly_volume(self):\n volume_params = {'status': 'available'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.volume_api.update_readonly_flag(self.context, vref, True)\n aref = self.volume_api.attachment_create(self.context,\n vref,\n fake.UUID2)\n self.assertEqual(fake.UUID2, aref.instance_uuid)\n self.assertIsNone(aref.attach_time)\n self.assertEqual('reserved', aref.attach_status)\n self.assertEqual('ro', aref.attach_mode)\n self.assertEqual(vref.id, aref.volume_id)\n self.assertEqual({}, aref.connection_info)",
"def _write_attachment(self, root, context=None):\n fecha = time.strftime('%Y_%m_%d_%H%M%S')\n name = 'IVA_' + fecha + '.' + 'txt'\n self.env['ir.attachment'].create({\n 'name': name,\n 'datas': base64.encodestring(root),\n 'datas_fname': name,\n 'res_model': 'txt.iva',\n 'res_id': self.ids[0],\n })\n msg = _(\"File TXT %s generated.\") % (name)\n self.message_post(body=msg)",
"def create_disk_instance(device, disk_params):\n\n domain_name = device[\"name\"]\n disk_instance_path = \"\"\n\n if \"type\" in disk_params:\n if disk_params[\"type\"] == \"image\" and \"image_id\" in disk_params:\n logger.debug(\"Creating secondary/tertiary Disk information\")\n image_id = disk_params[\"image_id\"]\n disk_image = Image.objects.get(pk=image_id)\n disk_base_path = settings.MEDIA_ROOT + \"/\" + disk_image.filePath.url\n\n disk_instance_path = osUtils.get_instance_path_from_image(disk_base_path,\n domain_name + \"_secondary_image.img\"\n )\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_thin_provision_instance(disk_base_path,\n domain_name + \"_secondary_image.img\"\n ):\n raise Exception(\"Could not create image instance for image: \" + disk_base_path)\n\n elif disk_params[\"type\"] == \"blank\":\n disk_instance_path = settings.MEDIA_ROOT \\\n + \"/user_images/instances/\" + domain_name + \"_secondary_blank.img\"\n\n disk_size = \"16G\"\n if \"size\" in disk_params:\n disk_size = disk_params[\"size\"]\n\n if not osUtils.check_path(disk_instance_path):\n if not osUtils.create_blank_image(disk_instance_path, disk_size):\n raise Exception(\"Could not create image instance for image: \" + disk_instance_path)\n\n elif disk_params[\"type\"] == \"config_drive\":\n # let's check if config_drive is supported for this vm_type!\n # this is usually used for vMX in openstack, however, we can also use it here for KVM deployments\n disk_instance_path = ''\n if \"configDriveSupport\" in device and device[\"configDriveSupport\"] is True:\n\n logger.debug(\"Lets create a config-drive!\")\n\n # keep a dict of files with format: filename: filecontents\n files = dict()\n params = device[\"configDriveParams\"]\n if \"configDriveParamsFile\" in device and device[\"configDriveParamsFile\"]:\n logger.debug(\"Using inline config_drive format\")\n # behavior change 12-28-2016 - allow passing a list of templates and destinations\n # instead of defining the params directly on the device object\n # if the configDriveParams is a dict, then this is an older topology, leave this code here\n # to still support them - otherwise fall through to the isinstance check for list type for\n # newer style configuration\n if isinstance(params, dict):\n name = device[\"configDriveParamsFile\"]\n file_data = \"\"\n # config drive params are usually a dict - to make json serialization easier\n # for our purposes here, let's just make a file with a single key: value per line\n # note, we can add a serialization format to the vm_type.js if needed here\n # only currently used for /boot/loader.conf in vmx and riot\n for k in params:\n file_data += '%s=\"%s\"\\n' % (k, params[k])\n\n files[name] = file_data\n\n # junos customization\n # let's also inject a default config here as well if possible!\n if \"junos\" in device[\"type\"]:\n logger.debug(\"Creating Junos configuration template\")\n junos_config = osUtils.get_junos_default_config_template(device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"])\n\n if junos_config is not None:\n files[\"/juniper.conf\"] = junos_config\n\n # check for new (12-28-2016) style config drive params definition\n if isinstance(params, list):\n logger.debug(\"params is a list\")\n for p in params:\n if \"template\" in p and \"destination\" in p:\n file_data = None\n file_data = osUtils.compile_config_drive_params_template(\n p[\"template\"],\n device[\"name\"],\n device[\"label\"],\n device[\"password\"],\n device[\"ip\"],\n device[\"managementInterface\"]\n )\n if file_data is not None:\n files[p[\"destination\"]] = file_data\n\n disk_instance_path = osUtils.create_config_drive(device[\"name\"], files)\n if disk_instance_path is None:\n disk_instance_path = ''\n\n logger.debug(\"Using %s\" % disk_instance_path)\n return disk_instance_path",
"def make_VolumeAttachmentV2(lun=None,\n lunType='Auto',\n permanent=False,\n storagePaths=[],\n volumeName=None,\n volumeProvisionType='Thin',\n volumeProvisionedCapacityBytes=None,\n volumeShareable=False,\n volumeStoragePoolUri=None,\n volumeStorageSystemUri=None,\n volumeUri=None):\n if volumeProvisionedCapacityBytes:\n volAttach = {'id': None,\n 'lunType': lunType,\n 'permanent': permanent,\n 'volumeName': volumeName,\n 'volumeUri': None,\n 'volumeProvisionType': volumeProvisionType,\n 'volumeProvisionedCapacityBytes': volumeProvisionedCapacityBytes,\n 'volumeShareable': volumeShareable,\n 'volumeStoragePoolUri': volumeStoragePoolUri,\n 'volumeStorageSystemUri': None,\n 'storagePaths': storagePaths,\n }\n else:\n volAttach = {'id': None,\n 'lunType': lunType,\n 'volumeUri': volumeUri,\n 'volumeStoragePoolUri': volumeStoragePoolUri,\n 'volumeStorageSystemUri': volumeStorageSystemUri,\n 'storagePaths': storagePaths,\n }\n\n if lunType == 'Manual':\n volAttach['lun'] = lun\n\n return volAttach",
"def perform_create(self, serializer):\n\n attachment = serializer.save()\n attachment.user = self.request.user\n attachment.save()",
"def create_record_w_file(client, record, headers):\n # Create draft\n record[\"files\"] = {\"enabled\": True}\n response = client.post(\"/records\", json=record, headers=headers)\n assert response.status_code == 201\n recid = response.json[\"id\"]\n\n # Attach a file to it\n response = client.post(\n f\"/records/{recid}/draft/files\", headers=headers, json=[{\"key\": \"test.pdf\"}]\n )\n assert response.status_code == 201\n response = client.put(\n f\"/records/{recid}/draft/files/test.pdf/content\",\n headers={\n \"content-type\": \"application/octet-stream\",\n \"accept\": \"application/json\",\n },\n data=BytesIO(b\"testfile\"),\n )\n assert response.status_code == 200\n response = client.post(\n f\"/records/{recid}/draft/files/test.pdf/commit\", headers=headers\n )\n assert response.status_code == 200\n\n # Publish it\n response = client.post(f\"/records/{recid}/draft/actions/publish\", headers=headers)\n assert response.status_code == 202\n\n return recid",
"def _generate_attachment(self):\n Attachment = self.env['ir.attachment']\n ReportXml = self.env['ir.actions.report.xml']\n Report = self.env['report']\n pages = {}\n for current_order in self:\n report = ReportXml.search([('model', '=', current_order.res_model)], limit=1)\n if current_order.attachment_id: # compute page number\n # avoid to recompute the number of page each time for the attachment\n nbr_pages = pages.get(current_order.attachment_id.id)\n if not nbr_pages:\n nbr_pages = current_order._count_pages_pdf(current_order.attachment_id.datas.decode('base64'))\n pages[current_order.attachment_id.id] = nbr_pages\n current_order.write({\n 'nbr_pages': nbr_pages\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and report: # check report\n # browse object and find its pdf (binary content)\n object_to_print = self.env[current_order.res_model].browse(current_order.res_id)\n bin_pdf = Report.get_pdf(object_to_print, report.report_name)\n\n # compute the name of the new attachment\n filename = False\n if report.attachment:\n filename = safe_eval(report.attachment, {'object': object_to_print, 'time': time})\n if not filename:\n filename = '%s-%s' % (current_order.res_model.replace(\".\", \"_\"), current_order.res_id)\n\n # create the new ir_attachment\n attachment_value = {\n 'name': filename,\n 'res_name': filename,\n 'res_model': current_order.res_model,\n 'res_id': current_order.res_id,\n 'datas': base64.b64encode(bin_pdf),\n 'datas_fname': filename+'.pdf',\n }\n new_attachment = Attachment.create(attachment_value)\n\n # add the new attachment to the print order\n current_order.write({\n 'nbr_pages': self._count_pages_pdf(bin_pdf),\n 'attachment_id': new_attachment.id\n })\n elif not current_order.attachment_id and current_order.res_model and current_order.res_id and not report: # error : no ir.actions.report.xml found for res_model\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document you want to print and send is not printable. There is no report action (ir.actions.report.xml) for the model %s.') % (current_order.res_model,)\n })\n else: # error : not attachament can be generate, no attach_id or no res_model/res_id\n current_order.write({\n 'state': 'error',\n 'error_message': _('The document has no associated PDF : you have to give select an Attachment file, or set up the Object ID and Model Name fields.')\n })",
"def test_attachment_create_creating_volume(self):\n volume_params = {'status': 'creating'}\n\n vref = tests_utils.create_volume(self.context, **volume_params)\n self.assertRaises(exception.InvalidVolume,\n self.volume_api.attachment_create,\n self.context,\n vref,\n fake.UUID1)",
"def _create_disk_from_image(self, context, instance, image_meta,\n image_type=DiskType.BOOT):\n pass",
"def attachment(self, attachment_id):\r\n return Attachment(self, attachment_id)",
"def create(self, filesystem=None):\n raise NotImplementedError()",
"def Attachment(self, table, sys_id=None):\n return Attachment(self, table, sys_id)",
"def attach_file(cls, message, file):\n\n if file.size:\n filename = smart_text(file.name)\n logger.debug(filename)\n try:\n mime_type = file.content_type\n except:\n mime_type = mimetypes.guess_type(filename, strict=False)[0]\n if not(mime_type):\n mime_type = 'application/octet-stream'\n\n att = VinceAttachment(\n file=file,\n filename=os.path.basename(filename),\n mime_type=mime_type,\n size=file.size,\n )\n att.save()\n\n na = cls.objects.create(message=message, file=att)\n print(na.file.file.name)\n s3 = boto3.client('s3', region_name=settings.AWS_REGION)\n # check tag will be acceptable?\n nopass = re.findall(r'[^-+= \\.:/@A-Za-z0-9_]', filename)\n if nopass:\n #this tag contains unacceptable chars, so do not add tag\n rd = s3.put_object_tagging(Bucket=settings.PRIVATE_BUCKET_NAME,\n Key='vince_attachments/'+ na.file.file.name,\n Tagging={'TagSet':[{'Key': 'Message', 'Value':str(message.id)}]})\n else:\n rd = s3.put_object_tagging(Bucket=settings.PRIVATE_BUCKET_NAME,\n Key='vince_attachments/'+ na.file.file.name,\n Tagging={'TagSet':[{'Key': 'Message', 'Value':str(message.id)},\n {'Key':'Filename', 'Value':filename}]})"
] | [
"0.6559428",
"0.6483205",
"0.6458923",
"0.6397481",
"0.6285788",
"0.6222577",
"0.6217149",
"0.6192481",
"0.61305714",
"0.59841096",
"0.5957176",
"0.5932291",
"0.59042203",
"0.5887022",
"0.58844835",
"0.5867121",
"0.5867092",
"0.580685",
"0.58006567",
"0.5778685",
"0.5754964",
"0.57391876",
"0.5734245",
"0.5733566",
"0.5726292",
"0.5623682",
"0.5618044",
"0.55896467",
"0.557044",
"0.55677724"
] | 0.7236901 | 0 |
Samples a disk and waits until disk is found in the specific storage domain or until timeout is reached | def wait_for_disk_storage_domain(
disk, storage_domain, key='id', timeout=600, interval=5
):
disk_name = get_disk_obj(disk, key).get_name() if key == 'id' else disk
for sample in TimeoutingSampler(
timeout, interval, get_disk_storage_domain_name, disk_name
):
if sample == storage_domain:
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_least_busy_host_gets_volume(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)\n volume1.kill()\n volume2.kill()",
"def test_least_busy_host_gets_volume_no_queue(self):\n volume1 = service.Service('host1',\n 'nova-volume',\n 'volume',\n FLAGS.volume_manager)\n volume1.start()\n volume2 = service.Service('host2',\n 'nova-volume',\n 'volume',\n FLAGS.volume_manager)\n volume2.start()\n volume_id1 = self._create_volume()\n volume1.create_volume(self.context, volume_id1)\n volume_id2 = self._create_volume()\n host = self.scheduler.driver.schedule_create_volume(self.context,\n volume_id2)\n self.assertEqual(host, 'host2')\n volume1.delete_volume(self.context, volume_id1)\n db.volume_destroy(self.context, volume_id2)",
"def start_creating_disks_for_test(\n shared=False, sd_name=None, disk_size=config.DISK_SIZE,\n interfaces=INTERFACES\n):\n disks = []\n storage_domain_object = ll_sd.get_storage_domain_obj(sd_name)\n sd_type = storage_domain_object.get_storage().get_type()\n logger.info(\"Creating all disks required for test\")\n disk_permutations = ll_disks.get_all_disk_permutation(\n block=sd_type in config.BLOCK_TYPES, shared=shared,\n interfaces=interfaces\n )\n # Provide a warning in the logs when the total number of disk\n # permutations is 0\n if len(disk_permutations) == 0:\n logger.warn(\"The number of disk permutations is 0\")\n\n def add_disk(permutation):\n alias, interface = add_new_disk(\n sd_name=sd_name, permutation=permutation, shared=shared,\n sd_type=sd_type, disk_size=disk_size\n )\n disk = {\n 'disk_name': alias,\n 'disk_interface': interface\n }\n disks.append(disk)\n\n with ThreadPoolExecutor(max_workers=len(disk_permutations)) as executor:\n for disk in disk_permutations:\n executor.submit(add_disk, disk)\n return disks",
"def disk():\n run(env.disk_usage_command % env)",
"def get_disk_rw(sampling_duration):\n \n #get te list of devices\n with open('/proc/partitions') as f:\n devices = [re.search('\\s([^\\s]+)$', line).group(1).strip() for line in re.findall('^\\s*[0-9]+\\s+[1-9]+.*$', f.read(), flags = re.MULTILINE)]\n \n with open('/proc/diskstats') as f1:\n with open('/proc/diskstats') as f2:\n content1 = f1.read() #first collection\n yield {} #yield so that caller can put delay before sampling again\n content2 = f2.read() #second collection\n \n #initialize the dict with interfaces and values\n data = dict(zip(devices, [dict(zip(['reads', 'writes'], [0, 0])) for device in devices]))\n\n for line in content1.splitlines(): #read through first collection\n for device in [device_x for device_x in devices if '%s ' % device_x in line]:\n fields = line.strip().split('%s ' % device)[1].split()\n data[device]['reads'] = int(fields[0])\n data[device]['writes'] = int(fields[4])\n break\n \n for line in content2.splitlines(): #read through second collection\n for device in [device_x for device_x in devices if '%s ' % device_x in line]:\n fields = line.strip().split('%s ' % device)[1].split()\n data[device]['reads'] = (int(fields[0]) - data[device]['reads']) / float(sampling_duration)\n data[device]['writes'] = (int(fields[4]) - data[device]['writes']) / float(sampling_duration)\n break \n \n yield data",
"def wait_until_mounted(fname):\n max_tries = 36\n wait_sec = 5\n num_tries = 0\n while 1:\n if os.path.exists(fname):\n break\n elif num_tries > max_tries:\n raise ValueError(\"Did not find {f} after {s} seconds.\".format(\n f=fname, s=max_tries*wait_sec))\n time.sleep(wait_sec)\n num_tries += 1",
"def wait_for_disk_to_become_available(device):\n pids = ['']\n stderr = ['']\n interval = CONF.disk_partitioner.check_device_interval\n max_retries = CONF.disk_partitioner.check_device_max_retries\n\n def _wait_for_disk():\n # A regex is likely overkill here, but variations in fuser\n # means we should likely use it.\n fuser_pids_re = re.compile(r'\\d+')\n\n # There are 'psmisc' and 'busybox' versions of the 'fuser' program. The\n # 'fuser' programs differ in how they output data to stderr. The\n # busybox version does not output the filename to stderr, while the\n # standard 'psmisc' version does output the filename to stderr. How\n # they output to stdout is almost identical in that only the PIDs are\n # output to stdout, with the 'psmisc' version adding a leading space\n # character to the list of PIDs.\n try:\n # NOTE(ifarkas): fuser returns a non-zero return code if none of\n # the specified files is accessed.\n # NOTE(TheJulia): fuser does not report LVM devices as in use\n # unless the LVM device-mapper device is the\n # device that is directly polled.\n # NOTE(TheJulia): The -m flag allows fuser to reveal data about\n # mounted filesystems, which should be considered\n # busy/locked. That being said, it is not used\n # because busybox fuser has a different behavior.\n # NOTE(TheJuia): fuser outputs a list of found PIDs to stdout.\n # All other text is returned via stderr, and the\n # output to a terminal is merged as a result.\n out, err = execute('fuser', device, check_exit_code=[0, 1],\n run_as_root=True)\n\n if not out and not err:\n return True\n\n stderr[0] = err\n # NOTE: findall() returns a list of matches, or an empty list if no\n # matches\n pids[0] = fuser_pids_re.findall(out)\n\n except processutils.ProcessExecutionError as exc:\n LOG.warning('Failed to check the device %(device)s with fuser:'\n ' %(err)s', {'device': device, 'err': exc})\n return False\n\n retry = tenacity.retry(\n retry=tenacity.retry_if_result(lambda r: not r),\n stop=tenacity.stop_after_attempt(max_retries),\n wait=tenacity.wait_fixed(interval),\n reraise=True)\n try:\n retry(_wait_for_disk)()\n except tenacity.RetryError:\n if pids[0]:\n raise exception.IronicException(\n _('Processes with the following PIDs are holding '\n 'device %(device)s: %(pids)s. '\n 'Timed out waiting for completion.')\n % {'device': device, 'pids': ', '.join(pids[0])})\n else:\n raise exception.IronicException(\n _('Fuser exited with \"%(fuser_err)s\" while checking '\n 'locks for device %(device)s. Timed out waiting for '\n 'completion.')\n % {'device': device, 'fuser_err': stderr[0]})",
"def wait_for_disks_status_ok(request, storage):\n self = request.node.cls\n\n def finalizer():\n assert ll_disks.wait_for_disks_status(self.disk_name), (\n \"Failed to wait for disk %s to be in status OK\" % self.disk_name\n )\n\n request.addfinalizer(finalizer)",
"def scan_disk(disk_url, scan_file_dir):\n\n # make the dir if it doesn't exist\n if not os.path.exists(scan_file_dir):\n os.makedirs(scan_file_dir)\n \n # open up the image\n img = pytsk3.Img_Info(url=disk_url)\n \n # get the volume info\n VOL_INFO = pytsk3.Volume_Info(img)\n\n\n # print out some info about the disk image\n logger.debug(\"--- Volume info ---\")\n logger.debug(\"Current: %d\" % VOL_INFO.current)\n logger.debug(\"VS Type: %d\" % VOL_INFO.info.vstype)\n logger.debug(\"Offset: %d\" % VOL_INFO.info.offset)\n logger.debug(\"Block Size: %d\" % VOL_INFO.info.block_size)\n logger.debug(\"Endian: %d\" % VOL_INFO.info.endian)\n logger.debug(\"Partition List: %s\" % VOL_INFO.info.part_list)\n logger.debug(\"Parition Count: %d\" % VOL_INFO.info.part_count)\n logger.debug(\"--- Volume info ---\")\n\n\n # list of filenames we have to cat together at the end\n files = []\n\n sector_size = VOL_INFO.info.block_size\n\n part_number = 1\n\n sparse_number = 0\n \n copied_front_data = False\n\n # loop over each volume\n for vol in VOL_INFO:\n \n logger.debug(\"--- Partition ---\")\n logger.debug(\"Start: %d\" % vol.start)\n logger.debug(\"Length: %d\" % vol.len)\n logger.debug(\"Description: %s\" % vol.desc)\n logger.debug(\"Address: %d\" % vol.addr)\n logger.debug(\"Flags: %d\" % vol.flags)\n\n # ignore partition table at beginning\n if vol.addr == 0:\n continue\n\n # copy the MBR and other stuff if this is the unpartitioned space between 0\n # and the first partition\n\n if not copied_front_data:\n fname = save_front_data(disk_url, scan_file_dir, vol.start, vol.len*sector_size)\n files.append(fname)\n copied_front_data = True\n \n continue\n\n type = vol.desc.split(\" \")[0]\n\n # if partition type is NTFS, do an NTFS clone on it\n if vol.desc == 'NTFS (0x07)':\n \n print \"* Scanning %s (%d)...\"%(disk_url,part_number)\n \n # Win 7 specific hack, so that we only have 1 sparse file.\n if vol.len == 204800 and vol.start == 2048:\n fname = save_NTFS_partition(part_number, disk_url, scan_file_dir, meta_only=False)\n else:\n fname = save_NTFS_partition(part_number, disk_url, scan_file_dir)\n files.append(fname)\n part_number += 1\n \n else:\n # create empty sparse file since we don't support other filesystems\n #fname = save_sparse(sparse_number, scan_file_dir, vol.len*sector_size)\n #files.append(fname)\n #sparse_number += 1\n pass\n \n \n \n # cat everything together\n logger.info(\"Cat everything together\")\n filenames = \" \".join(files)\n output_img = os.path.join(scan_file_dir, \"disk.img\")\n #cmd = \"cat \" + filenames + \" > \" + os.path.join(scan_file_dir, \"disk.img\")\n \n print \"* Scan complete, merging scans into one file (%s)...\"%output_img\n \n cmd = \"cat \" + filenames + \" | cp --sparse=always /proc/self/fd/0 \" + output_img\n \n logger.info(\"Running: %s\" % cmd)\n \n subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE).stdout.read()\n\n logging.info(\"Changing permissions on '%s'\"%output_img)\n\n os.chmod(output_img,0555)",
"def test_select_by_filesystem(self):\n self.create_simple_filesystem(synthetic_host(\"myserver\"))\n\n response = self.api_client.get(\"/api/target/\", data={\"filesystem_id\": self.fs.id})\n self.assertHttpOK(response)\n content = json.loads(response.content)\n self.assertEqual(3, len(content[\"objects\"]))\n\n response = self.api_client.get(\"/api/target/\", data={\"filesystem_id\": -1000})\n self.assertHttpOK(response)\n content = json.loads(response.content)\n self.assertEqual(0, len(content[\"objects\"]))",
"def waiting_result_file(input_name):\n\n time_limit = 0\n\n while True:\n if os.path.isfile(input_name) is True:\n break\n\n if time_limit > 10000:\n print \"There is no result file. It over the time limitation.\"\n exit()\n\n time.sleep(0.1)\n time_limit += 1",
"def bdev_wait_for_examine(client):\n return client.call('bdev_wait_for_examine')",
"def DriveWaitfor(expected,\n states,\n compare,\n timeout,\n mvip,\n username,\n password):\n log = GetLogger()\n op = sfdefaults.all_compare_ops[compare]\n\n log.info(\"Waiting for {}{} drives in state [{}]...\".format(op, expected, \",\".join(states)))\n start_time = time.time()\n last_count = 0\n while True:\n if time.time() - start_time > timeout:\n log.error(\"Timeout waiting for drives\")\n return False\n\n try:\n drives = SFCluster(mvip, username, password).ListDrives(driveState=states)\n except SolidFireError as e:\n log.error(\"Failed to list drives: {}\".format(e))\n return False\n\n if len(drives) != last_count:\n log.info(\" Found {} drives\".format(len(drives)))\n last_count = len(drives)\n\n expression = \"{}{}{}\".format(len(drives), op, expected)\n log.debug(\"Evaluating expression {}\".format(expression))\n result = eval(expression)\n\n if result:\n log.passed(\"Successfully waited for drives\")\n return True\n\n time.sleep(sfdefaults.TIME_SECOND * 20)",
"def _load_disk(self):\r\n pass",
"def _load_disk(self):",
"def _load_disk(self):",
"def test_mount_status_nas_share(self):\n pass",
"def waitForSound(self, time_limit = 7):\n\n\t\tself.sound.subscribe(\"sound_detection_client\")\n\n\t\t# give waiting a 7-second time limit\n\t\ttimeout = time.time() + 7\n\n\t\t# check for new sounds every 0.2 seconds\n\t\twhile (self.mem.getData(\"SoundDetected\")[0] != 1) and (time.time() < timeout):\n\t\t\ttime.sleep(0.2)\n\n\t\tself.sound.unsubscribe(\"sound_detection_client\")",
"def do_disk_action(\n action, disk_name=None, target_domain=None, disk_id=None, wait=True,\n timeout=COPY_MOVE_DISK_TIMEOUT, sleep=10, positive=True,\n new_disk_alias=None\n):\n sd = STORAGE_DOMAIN_API.find(target_domain)\n if disk_id:\n disk = DISKS_API.find(disk_id, attribute='id')\n elif disk_name:\n disk = DISKS_API.find(disk_name)\n else:\n raise ValueError(\"Either specify disk_id or disk_name\")\n\n DISKS_API.logger.info(\n \"Disk found. name: %s id: %s\", disk.get_alias(), disk.get_id()\n )\n updated_disk_alias = None\n if new_disk_alias and action == 'copy':\n logger.info(\n \"Disk with current alias %s will be copied into a disk with \"\n \"alias %s\", disk.get_alias(), new_disk_alias\n )\n updated_disk_alias = Disk(alias=new_disk_alias)\n\n if not DISKS_API.syncAction(\n disk, action, storage_domain=sd, positive=positive,\n disk=updated_disk_alias\n ):\n return False\n\n if wait and positive:\n # TODO: shouldn't it be possible to use a query here?\n for sample in TimeoutingSampler(\n timeout, sleep, getStorageDomainDisks, target_domain, False\n ):\n for target_disk in sample:\n if action == 'move':\n if disk.get_id() == target_disk.get_id() and (\n target_disk.get_status() == ENUMS['disk_state_ok']\n ):\n return True\n else:\n if target_disk.get_status() == ENUMS['disk_state_ok']:\n return True\n return False\n return True",
"def disk_get(context, disk_id):\n return NotImplemented",
"def get_disk(disk_name: Optional[str] = None,\n resource_group_name: Optional[str] = None,\n opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDiskResult:\n __args__ = dict()\n __args__['diskName'] = disk_name\n __args__['resourceGroupName'] = resource_group_name\n opts = pulumi.InvokeOptions.merge(_utilities.get_invoke_opts_defaults(), opts)\n __ret__ = pulumi.runtime.invoke('azure-native:compute/v20230402:getDisk', __args__, opts=opts, typ=GetDiskResult).value\n\n return AwaitableGetDiskResult(\n bursting_enabled=pulumi.get(__ret__, 'bursting_enabled'),\n bursting_enabled_time=pulumi.get(__ret__, 'bursting_enabled_time'),\n completion_percent=pulumi.get(__ret__, 'completion_percent'),\n creation_data=pulumi.get(__ret__, 'creation_data'),\n data_access_auth_mode=pulumi.get(__ret__, 'data_access_auth_mode'),\n disk_access_id=pulumi.get(__ret__, 'disk_access_id'),\n disk_iops_read_only=pulumi.get(__ret__, 'disk_iops_read_only'),\n disk_iops_read_write=pulumi.get(__ret__, 'disk_iops_read_write'),\n disk_m_bps_read_only=pulumi.get(__ret__, 'disk_m_bps_read_only'),\n disk_m_bps_read_write=pulumi.get(__ret__, 'disk_m_bps_read_write'),\n disk_size_bytes=pulumi.get(__ret__, 'disk_size_bytes'),\n disk_size_gb=pulumi.get(__ret__, 'disk_size_gb'),\n disk_state=pulumi.get(__ret__, 'disk_state'),\n encryption=pulumi.get(__ret__, 'encryption'),\n encryption_settings_collection=pulumi.get(__ret__, 'encryption_settings_collection'),\n extended_location=pulumi.get(__ret__, 'extended_location'),\n hyper_v_generation=pulumi.get(__ret__, 'hyper_v_generation'),\n id=pulumi.get(__ret__, 'id'),\n last_ownership_update_time=pulumi.get(__ret__, 'last_ownership_update_time'),\n location=pulumi.get(__ret__, 'location'),\n managed_by=pulumi.get(__ret__, 'managed_by'),\n managed_by_extended=pulumi.get(__ret__, 'managed_by_extended'),\n max_shares=pulumi.get(__ret__, 'max_shares'),\n name=pulumi.get(__ret__, 'name'),\n network_access_policy=pulumi.get(__ret__, 'network_access_policy'),\n optimized_for_frequent_attach=pulumi.get(__ret__, 'optimized_for_frequent_attach'),\n os_type=pulumi.get(__ret__, 'os_type'),\n property_updates_in_progress=pulumi.get(__ret__, 'property_updates_in_progress'),\n provisioning_state=pulumi.get(__ret__, 'provisioning_state'),\n public_network_access=pulumi.get(__ret__, 'public_network_access'),\n purchase_plan=pulumi.get(__ret__, 'purchase_plan'),\n security_profile=pulumi.get(__ret__, 'security_profile'),\n share_info=pulumi.get(__ret__, 'share_info'),\n sku=pulumi.get(__ret__, 'sku'),\n supported_capabilities=pulumi.get(__ret__, 'supported_capabilities'),\n supports_hibernation=pulumi.get(__ret__, 'supports_hibernation'),\n tags=pulumi.get(__ret__, 'tags'),\n tier=pulumi.get(__ret__, 'tier'),\n time_created=pulumi.get(__ret__, 'time_created'),\n type=pulumi.get(__ret__, 'type'),\n unique_id=pulumi.get(__ret__, 'unique_id'),\n zones=pulumi.get(__ret__, 'zones'))",
"def test_mount_status_nas_share_by_pool(self):\n pass",
"async def discover(self, timeout: int):",
"def prepare_disks_with_fs_for_vm(storage_domain, vm_name, executor=None):\n disk_ids = list()\n mount_points = list()\n disk_names = []\n disk_interfaces = []\n logger.info('Creating disks for test')\n disks = start_creating_disks_for_test(sd_name=storage_domain)\n for disk in disks:\n disk_names.append(disk['disk_name'])\n disk_interfaces.append(disk['disk_interface'])\n disk_ids.append(ll_disks.get_disk_obj(disk['disk_name']).get_id())\n\n assert ll_disks.wait_for_disks_status(\n disk_names, timeout=CREATION_DISKS_TIMEOUT\n ), \"Some disks are still locked\"\n logger.info(\"Attaching and activating disks %s\", disk_names)\n prepare_disks_for_vm(vm_name, disk_names, interfaces=disk_interfaces)\n\n if ll_vms.get_vm_state(vm_name) == config.VM_DOWN:\n ll_vms.startVm(\n True, vm_name, wait_for_status=config.VM_UP, wait_for_ip=True\n )\n if not executor:\n executor = get_vm_executor(vm_name)\n logger.info(\"Creating filesystems on disks %s\", disks)\n\n with ThreadPoolExecutor(max_workers=len(disk_names)) as thread_executor:\n for disk_alias in disk_names:\n result = thread_executor.submit(\n create_fs_on_disk, vm_name=vm_name, disk_alias=disk_alias,\n executor=executor\n )\n ecode = result.result()[0]\n mount_point = result.result()[1]\n if not ecode:\n logger.error(\n \"Cannot create filesysem on disk %s:\", disk_alias\n )\n mount_point = ''\n mount_points.append(mount_point)\n logger.info(\n \"Mount points for new disks: %s\", mount_points\n )\n return disk_ids, mount_points",
"def test_too_many_gigabytes(self):\n volume1 = self.start_service('volume', host='host1')\n volume2 = self.start_service('volume', host='host2')\n volume_ids1 = []\n volume_ids2 = []\n for index in xrange(FLAGS.max_gigabytes):\n volume_id = self._create_volume()\n volume1.create_volume(self.context, volume_id)\n volume_ids1.append(volume_id)\n volume_id = self._create_volume()\n volume2.create_volume(self.context, volume_id)\n volume_ids2.append(volume_id)\n volume_id = self._create_volume()\n self.assertRaises(driver.NoValidHost,\n self.scheduler.driver.schedule_create_volume,\n self.context,\n volume_id)\n for volume_id in volume_ids1:\n volume1.delete_volume(self.context, volume_id)\n for volume_id in volume_ids2:\n volume2.delete_volume(self.context, volume_id)\n volume1.kill()\n volume2.kill()",
"def do_disk_event(client, args):\n args.type = 'disk'\n do_event_show(client, args)",
"def scan_uploaded_samples() -> dict:\n while Analyzer.scanning:\n # Retrieve the scan results\n scan_results = Scanner.get_scans(ids=scan_id)\n try:\n if scan_results[\"body\"][\"resources\"][0][\"status\"] == \"done\":\n # Scan is complete, retrieve our results\n results = scan_results[\"body\"][\"resources\"][0][\"samples\"]\n # and break out of the loop\n Analyzer.scanning = False\n else:\n # Not done yet, sleep for a bit\n time.sleep(Config.scan_delay)\n except IndexError:\n # Results aren't populated yet, skip\n pass\n\n return results",
"async def test_sampling_size_1(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass,\n \"sensor\",\n {\n \"sensor\": [\n {\n \"platform\": \"statistics\",\n \"name\": \"test\",\n \"entity_id\": \"sensor.test_monitored\",\n \"state_characteristic\": \"mean\",\n \"sampling_size\": 1,\n },\n ]\n },\n )\n await hass.async_block_till_done()\n\n for value in VALUES_NUMERIC:\n hass.states.async_set(\n \"sensor.test_monitored\",\n str(value),\n {ATTR_UNIT_OF_MEASUREMENT: UnitOfTemperature.CELSIUS},\n )\n await hass.async_block_till_done()\n\n state = hass.states.get(\"sensor.test\")\n new_mean = float(VALUES_NUMERIC[-1])\n assert state is not None\n assert state.state == str(new_mean)\n assert state.attributes.get(\"buffer_usage_ratio\") == round(1 / 1, 2)",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def test_mount_status_nas_share_by_nas(self):\n pass"
] | [
"0.5733526",
"0.55582863",
"0.54887253",
"0.5453938",
"0.5273251",
"0.5197576",
"0.51590496",
"0.51475734",
"0.5147147",
"0.51260424",
"0.5120247",
"0.5102149",
"0.50740266",
"0.5062461",
"0.5061822",
"0.5061822",
"0.50525355",
"0.50219655",
"0.4995126",
"0.49904713",
"0.49873093",
"0.49757427",
"0.493524",
"0.4923128",
"0.49108943",
"0.4891616",
"0.48820186",
"0.48533458",
"0.48343274",
"0.482804"
] | 0.68130606 | 0 |
Return disk obj from disk attachment obj | def get_disk_obj_from_disk_attachment(disk_attachment):
return get_disk_obj(disk_attachment.get_id(), 'id') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def prepare_disk_attachment_object(disk_id=None, **kwargs):\n disk = kwargs.pop(\"disk\", None)\n disk_obj = disk if disk else prepare_ds_object(\"Disk\", id=disk_id)\n return prepare_ds_object(\"DiskAttachment\", disk=disk_obj, **kwargs)",
"def get_disk_obj(disk_alias, attribute='name'):\n return DISKS_API.find(disk_alias, attribute=attribute)",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def _getDisk(self):\n try:\n disk = self.parents[0]\n except IndexError:\n disk = None\n return disk",
"def disk(self):\n return self.__disk",
"def fileobject_to_dict(fo):\n if fo.allocated():\n # proc = subprocess.Popen(['./extract_strings', fo.inode()], stdout=subprocess.PIPE)\n # contents = proc.stdout.read()\n return {\n 'atime_dt': epoch_to_dt(fo.atime()),\n 'compressed_b': fo.compressed(),\n 'contents_t': string.translate(fo.contents(), filter),\n 'contents_display': string.translate(fo.contents(), filter),\n 'crtime_dt': epoch_to_dt(fo.crtime()),\n 'ctime_dt': epoch_to_dt(fo.ctime()),\n 'dtime_dt': epoch_to_dt(fo.dtime()),\n 'encrypted_b': fo.encrypted(),\n 'extension_facet': fo.ext(),\n 'fileid_i': int(fo._tags['id']),\n 'filename_display': fo.filename(),\n 'filename_t': fo.filename(),\n 'filesize_l': long(fo.filesize()),\n 'fragments_i': int(fo.fragments()),\n 'gid_i': int(fo._tags['gid']),\n #'id': uuid.uuid4(),\n 'id': hashlib.sha1(os.path.basename(IMAGE) + '_' + fo.inode()).hexdigest(),\n #'imagefile': fo._tags['imagefile'],\n 'inode_i': int(fo.inode()),\n 'libmagic_display': fo.libmagic(),\n 'libmagic_facet': fo.libmagic(),\n 'md5_s': fo.md5(),\n 'meta_type_i': fo._tags['meta_type'],\n 'mode_facet': int(fo._tags['mode']),\n 'mode_i': int(fo._tags['mode']),\n 'mtime_dt': epoch_to_dt(fo.mtime()),\n 'nlink_i': fo._tags['nlink'],\n 'name_type_s': fo.name_type(),\n 'partition_i': int(fo.partition()),\n 'sha1_s': fo.sha1(),\n 'uid_i': int(fo._tags['uid']),\n 'volume_display': IMAGE,\n 'volume_facet': os.path.basename(IMAGE)\n }\n else:\n return None",
"def __init__(self, attachment=None, *, parent=None, **kwargs):\n kwargs.setdefault('protocol', getattr(parent, 'protocol', None))\n kwargs.setdefault('main_resource',\n getattr(parent, 'main_resource', None))\n\n super().__init__(**kwargs)\n self.name = None\n self.attachment_type = 'file'\n self.attachment_id = None\n self.content_id = None\n self.is_inline = False\n self.attachment = None\n self.content = None\n self.on_disk = False\n self.on_cloud = kwargs.get('on_cloud', False)\n self.size = None\n\n if attachment:\n if isinstance(attachment, dict):\n if self._cloud_data_key in attachment:\n # data from the cloud\n attachment = attachment.get(self._cloud_data_key)\n self.attachment_id = attachment.get(self._cc('id'), None)\n self.content_id = attachment.get(self._cc('contentId'), None)\n self.is_inline = attachment.get(self._cc('IsInline'), False)\n self.name = attachment.get(self._cc('name'), None)\n self.content = attachment.get(self._cc('contentBytes'),\n None)\n self.attachment_type = 'item' if 'item' in attachment.get(\n '@odata.type', '').lower() else 'file'\n self.on_disk = False\n self.size = attachment.get(self._cc('size'), None)\n else:\n file_path = attachment.get('path', attachment.get('name'))\n if file_path is None:\n raise ValueError('Must provide a valid \"path\" or '\n '\"name\" for the attachment')\n self.content = attachment.get('content')\n self.on_disk = attachment.get('on_disk')\n self.attachment_id = attachment.get('attachment_id')\n self.attachment = Path(file_path) if self.on_disk else None\n self.name = (self.attachment.name if self.on_disk\n else attachment.get('name'))\n self.size = self.attachment.stat().st_size if self.attachment else None\n\n elif isinstance(attachment, str):\n self.attachment = Path(attachment)\n self.name = self.attachment.name\n elif isinstance(attachment, Path):\n self.attachment = attachment\n self.name = self.attachment.name\n elif isinstance(attachment, (tuple, list)):\n # files with custom names or Inmemory objects\n file_obj, custom_name = attachment\n if isinstance(file_obj, BytesIO):\n # in memory objects\n self.size = file_obj.getbuffer().nbytes\n self.content = base64.b64encode(file_obj.getvalue()).decode('utf-8')\n else:\n self.attachment = Path(file_obj)\n self.name = custom_name\n\n elif isinstance(attachment, AttachableMixin):\n # Object that can be attached (Message for example)\n self.attachment_type = 'item'\n self.attachment = attachment\n self.name = attachment.attachment_name\n self.content = attachment.to_api_data()\n self.content['@odata.type'] = attachment.attachment_type\n\n if self.content is None and self.attachment and self.attachment.exists():\n with self.attachment.open('rb') as file:\n self.content = base64.b64encode(file.read()).decode('utf-8')\n self.on_disk = True\n self.size = self.attachment.stat().st_size",
"def ParseDiskResourceFromAttachedDisk(resources, attached_disk):\n try:\n disk = resources.Parse(\n attached_disk.source, collection='compute.regionDisks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n try:\n disk = resources.Parse(attached_disk.source, collection='compute.disks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n raise cloud_resources.InvalidResourceException('Unable to parse [{}]'.format(\n attached_disk.source))",
"def getVmDisk(vmName, alias=None, disk_id=None):\n value = None\n if disk_id:\n prop = \"id\"\n value = disk_id\n elif alias:\n prop = \"name\"\n value = alias\n else:\n logger.error(\"No disk identifier or name was provided\")\n return None\n return get_disk_obj_from_disk_attachment(\n get_disk_attachment(vmName, value, prop)\n )",
"def _get_binary_filesystem(self, cr, uid, ids, name, arg, context=None):\n res = {}\n attachment_obj = self.pool.get('ir.attachment')\n\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = False\n attachment_ids = attachment_obj.search(cr, uid, [('res_model','=',self._name),('res_id','=',record.id),('binary_field','=',name)], context=context)\n import logging\n #_logger = logging.getLogger(__name__)\n #_logger.info('res %s', attachment_ids)\n if attachment_ids:\n img = attachment_obj.browse(cr, uid, attachment_ids, context=context)[0].datas\n #_logger.info('res %s', img)\n res[record.id] = img\n return res",
"def get_attachment(cls, client_object):\n return client_object.ovsdb.Port.get_one(\n search='name=%s' % client_object.name).interfaces",
"def _retrieve_blob(self, object_key):\n return self.s3_resource.Object(self.CVE_BUCKET, object_key).get()['Body'].read()",
"def get_file_object(self):\n try:\n # FieldFile.open() and File.open() don't return file objects, so\n # accessing it directly\n return self.datafile.file.file # FileStoreItem.FieldFile.File.file\n except ValueError as exc:\n logger.error(\"Error opening %s: %s\", self.datafile, exc)\n return None",
"def _load_disk(self):",
"def _load_disk(self):",
"def disk(self):\n return self._context.get(\"disk\", None)",
"def detachDiskFromMinipad(self , disk):\n return",
"def _load_disk(self):\r\n pass",
"def disk(self) -> HwDisk:\n return self._disk",
"def _load_object(self, cid):\n object_data = unixfs_pb2.Data()\n object_data.ParseFromString(self.client.object.data(\n cid,\n **self.client_request_kwargs,\n ))\n\n self.cid_type_cache[cid] = object_data.Type\n self.path_size_cache[cid] = object_data.filesize\n self.block_cache[cid] = object_data.Data\n self.subblock_sizes_cache[cid] = object_data.blocksizes\n\n return object_data",
"def get_as_attachment(self):\n return self.as_attachment",
"def _blob(self):\n return self._load_blob",
"def get_imguuid(disk_object):\n return disk_object.get_id()",
"def getFile(self, resource):\n resource = self.parseUrl(resource, 'files')\n\n res = self.getRequest(resource)\n fObj = vsdModels.File(**res)\n return fObj",
"def get_object(self, account, container, object):\n \n node = {'zone': 3, 'weight': 100.0, 'ip': self.address, \n 'id': 3, 'meta': '', 'device': 'sda6', 'port': self.port}\n \n headers = dict()\n partition = self.__get_partition__(account, container, object, self.part_shift)\n \n path = \"/%s/%s/%s\" % (account, container, object)\n method = \"GET\"\n conn = http_connect(node['ip'], node['port'],#class\n node['device'], partition, method, path,\n #headers=headers,\n #query_string=''\n )\n \n resp = conn.getresponse()\n status = resp.status\n headers = resp.getheaders()\n content = resp.read()\n\n return (status, headers, content)#http's return value, headers contain more information and could be verified later, content is the file content.",
"def test_get_file_object(self):\n pass",
"def get_attachment(self, attachment_name):\n attachment = self.get_attachment_meta(attachment_name)\n with attachment.open() as content:\n return content.read()",
"def get_disk_list_from_disk_attachments(disk_attachments):\n return [\n get_disk_obj_from_disk_attachment(disk_attachment) for\n disk_attachment in disk_attachments\n ]",
"def attachment(self, attachment_id):\r\n return Attachment(self, attachment_id)"
] | [
"0.74053216",
"0.6793077",
"0.6505615",
"0.62658155",
"0.6190773",
"0.61779624",
"0.59081876",
"0.59013236",
"0.5899676",
"0.58538806",
"0.58369386",
"0.5775959",
"0.576744",
"0.57462335",
"0.56462747",
"0.56462747",
"0.5566617",
"0.55492383",
"0.5525881",
"0.55177563",
"0.5516127",
"0.550804",
"0.5493953",
"0.5464921",
"0.54325765",
"0.54171014",
"0.5408046",
"0.5401904",
"0.538938",
"0.53847307"
] | 0.8071707 | 0 |
Return disk obj list from disk attachments list | def get_disk_list_from_disk_attachments(disk_attachments):
return [
get_disk_obj_from_disk_attachment(disk_attachment) for
disk_attachment in disk_attachments
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def attachments(self):\n return [Attachment(part) for part in self._parts]",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def get_overage_disks_json(disk_list):\n\t\tpass",
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def get_volumes(self, oid):\n path = '/servers/%s/os-volume_attachments' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List volumes for server %s: %s' % \n (oid, truncate(res)))\n return res[0]['volumeAttachments']",
"def items(self) -> typing.List[\"VolumeAttachment\"]:\n return typing.cast(\n typing.List[\"VolumeAttachment\"],\n self._properties.get(\"items\"),\n )",
"def listDeviceAttachments(*args, attribute: AnyStr=\"\", axis: AnyStr=\"\", clutch: AnyStr=\"\",\n device: AnyStr=\"\", file: AnyStr=\"\", selection: bool=True, write:\n bool=True, **kwargs)->AnyStr:\n pass",
"def items(\n self, value: typing.Union[typing.List[\"VolumeAttachment\"], typing.List[dict]]\n ):\n cleaned: typing.List[VolumeAttachment] = []\n for item in value:\n if isinstance(item, dict):\n item = typing.cast(\n VolumeAttachment,\n VolumeAttachment().from_dict(item),\n )\n cleaned.append(typing.cast(VolumeAttachment, item))\n self._properties[\"items\"] = cleaned",
"def content_list(self):\n return self.face.FACES.files.find({})",
"def get_ordered_partitions(disks):\n parts = []\n for disk in disks:\n parts += disk.partitions\n parts.sort(lambda x,y: len(x.mntpnt or '')-len(y.mntpnt or ''))\n return parts",
"def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments",
"def disk_ids(self):\n return list(self._disks)",
"def get_queryset(self, **kwargs):\n print(\"inside attachmentlistview for object %s\" % self.gfk_object)\n attachments = self.gfk_object.attachments.all()\n self.checker.prefetch_perms(attachments)\n return attachments",
"def l10n_mx_edi_retrieve_attachments(self):\n self.ensure_one()\n if not self.l10n_mx_edi_cfdi_name:\n return []\n domain = [\n ('res_id', '=', self.id),\n ('res_model', '=', self._name),\n ('name', '=', self.l10n_mx_edi_cfdi_name )]\n return self.env['ir.attachment'].search(domain)",
"def _get_disk_extension(self, disk_list):\r\n\r\n _extn_list = []\r\n for each_disk in disk_list:\r\n _disk_name, _extn_name = os.path.splitext(each_disk)\r\n _extn_list.append(_extn_name)\r\n\r\n _extn_list = list(set(_extn_list))\r\n\r\n if len(_extn_list) > 1:\r\n return _extn_list\r\n else:\r\n return _extn_list[0]",
"def get_all_files(schema_obj):\n\n files = []\n for vendor in schema_obj.vendor_list:\n for file_obj in vendor.file_list:\n files.append(file_obj)\n return files",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def attachments(self):\r\n return Attachments(self)",
"def getPDFList(self):\n metadata = self._getMetadata()\n if not 'pdf_files' in metadata:\n metadata['pdf_files'] = PersistentDict()\n\n return metadata['pdf_files']",
"def attachments(self, val: list):\n self._attachments = []\n if val is not None:\n for item in val:\n if isinstance(item, Attachment):\n self._attachments.append(item)",
"def _get_binary_filesystem(self, cr, uid, ids, name, arg, context=None):\n res = {}\n attachment_obj = self.pool.get('ir.attachment')\n\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = False\n attachment_ids = attachment_obj.search(cr, uid, [('res_model','=',self._name),('res_id','=',record.id),('binary_field','=',name)], context=context)\n import logging\n #_logger = logging.getLogger(__name__)\n #_logger.info('res %s', attachment_ids)\n if attachment_ids:\n img = attachment_obj.browse(cr, uid, attachment_ids, context=context)[0].datas\n #_logger.info('res %s', img)\n res[record.id] = img\n return res",
"def find_video_attachments(document_attachments):\n if isinstance(document_attachments, dict):\n document_attachments = [document_attachments]\n video_info_list = []\n for collection in document_attachments:\n if \"video\" in collection['contentType']:\n size = round(collection['size']/1048576, 2)\n video_info_list.append({\"download_url\": collection['url'], \"size\": size})\n return video_info_list",
"def GetFilesToBackup(domainXml):\n disks = root.findall(\"./devices/disk/source\")\n\n files = []\n for disk in disks:\n files.append(disk.get(\"file\"))\n\n return files",
"def _ListUsbDisks(self):\n disk_list = []\n for disk in glob.glob('/sys/block/sd*'):\n with open(disk + '/removable', 'r') as fd:\n if int(fd.readline()) == 1:\n device = '/dev/%s' % disk.split('/')[-1]\n manuf = self._GetDiskInfo(disk, 'manufacturer')\n product = self._GetDiskInfo(disk, 'product')\n capacity = self._GetDiskCapacity(device)\n if capacity:\n desc = '%s: %s %s %d GB' % (device, manuf, product, capacity)\n disk_list.append([device, manuf, product, capacity, desc])\n return disk_list"
] | [
"0.6690214",
"0.6335851",
"0.6259155",
"0.59162986",
"0.58995736",
"0.58648187",
"0.58527523",
"0.582788",
"0.58202004",
"0.5820131",
"0.5785615",
"0.57774895",
"0.5740484",
"0.5735309",
"0.5713783",
"0.56764907",
"0.563066",
"0.56279755",
"0.5622251",
"0.5558199",
"0.5538795",
"0.55291694",
"0.5507941",
"0.5448308",
"0.5445209",
"0.5423101",
"0.5417188",
"0.5412827",
"0.5398381",
"0.53766924"
] | 0.77885747 | 0 |
Get disk attachments objects or hrefs from a vm or template | def get_disk_attachments(name, object_type='vm', get_href=False):
api = get_api(object_type, "%ss" % object_type)
obj = api.find(name)
return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def get_disk_attachment(name, disk, attr='id', object_type='vm'):\n disk_list = get_disk_attachments(name, object_type=object_type)\n disk_id = None\n if attr == 'name' or attr == 'alias':\n for disk_obj in disk_list:\n disk_obj_alias = get_disk_obj(\n disk_obj.get_id(), attribute='id'\n ).get_alias()\n if disk_obj_alias == disk:\n disk_id = disk_obj.get_id()\n break\n elif attr == 'id':\n disk_id = disk\n\n for disk in disk_list:\n if disk.get_id() == disk_id:\n return disk\n return None",
"def fusion_api_get_storage_volume_attachments(self, uri=None, param='', api=None, headers=None):\n return self.volume_attachment.get(uri=uri, param=param, api=api, headers=headers)",
"def get_attachments_for(parser, token):\n def next_bit_for(bits, key, if_none=None):\n try:\n return bits[bits.index(key)+1]\n except ValueError:\n return if_none\n\n bits = token.contents.split()\n args = {\n 'obj': next_bit_for(bits, 'get_attachments_for'),\n 'var_name': next_bit_for(bits, 'as', '\"attachments\"'),\n }\n return AttachmentsForObjectNode(**args)",
"def get_volumes(self, oid):\n path = '/servers/%s/os-volume_attachments' % oid\n res = self.client.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('List volumes for server %s: %s' % \n (oid, truncate(res)))\n return res[0]['volumeAttachments']",
"def get_disk_list_from_disk_attachments(disk_attachments):\n return [\n get_disk_obj_from_disk_attachment(disk_attachment) for\n disk_attachment in disk_attachments\n ]",
"def l10n_mx_edi_retrieve_attachments(self):\n self.ensure_one()\n if not self.l10n_mx_edi_cfdi_name:\n return []\n domain = [\n ('res_id', '=', self.id),\n ('res_model', '=', self._name),\n ('name', '=', self.l10n_mx_edi_cfdi_name )]\n return self.env['ir.attachment'].search(domain)",
"def attachments(self):\n if \"attachments\" in self._prop_dict:\n return AttachmentsCollectionPage(self._prop_dict[\"attachments\"])\n else:\n return None",
"def get_queryset(self, **kwargs):\n print(\"inside attachmentlistview for object %s\" % self.gfk_object)\n attachments = self.gfk_object.attachments.all()\n self.checker.prefetch_perms(attachments)\n return attachments",
"def _get_binary_filesystem(self, cr, uid, ids, name, arg, context=None):\n res = {}\n attachment_obj = self.pool.get('ir.attachment')\n\n for record in self.browse(cr, uid, ids, context=context):\n res[record.id] = False\n attachment_ids = attachment_obj.search(cr, uid, [('res_model','=',self._name),('res_id','=',record.id),('binary_field','=',name)], context=context)\n import logging\n #_logger = logging.getLogger(__name__)\n #_logger.info('res %s', attachment_ids)\n if attachment_ids:\n img = attachment_obj.browse(cr, uid, attachment_ids, context=context)[0].datas\n #_logger.info('res %s', img)\n res[record.id] = img\n return res",
"def find_video_attachments(document_attachments):\n if isinstance(document_attachments, dict):\n document_attachments = [document_attachments]\n video_info_list = []\n for collection in document_attachments:\n if \"video\" in collection['contentType']:\n size = round(collection['size']/1048576, 2)\n video_info_list.append({\"download_url\": collection['url'], \"size\": size})\n return video_info_list",
"def getTemplateDisk(template_name, alias):\n template_disks = getObjDisks(\n template_name, get_href=False, is_template=True\n )\n for template_disk in template_disks:\n if alias == template_disk.get_alias():\n return template_disk\n raise EntityNotFound(\n \"Didn't find disk %s for template %s\" % (alias, template_name)\n )",
"def find_by_task(self, task, params={}, **options):\n path = \"/tasks/%s/attachments\" % (task)\n return self.client.get_collection(path, params, **options)",
"def find(self, md5=None):\n\n params = {}\n if md5:\n params['hash'] = md5\n else:\n params['name'] = self.name\n\n try:\n res = self.tq.get('/api/attachments', params=params)\n if res and res.get('data') and res['data']:\n self.fill_from_api_response(res['data'][0])\n except Exception:\n pass",
"def getAttachDir(request, pagename, create=0):\n if request.page and pagename == request.page.page_name:\n page = request.page # reusing existing page obj is faster\n else:\n page = Page(request, pagename)\n return page.getPagePath(\"attachments\", check_create=create)",
"def bootable_volume(volumes):\n for volume in volumes:\n if '/dev/vda' in volume['attachments']:\n return volume",
"def attachments(self):\n return self.properties.get('attachments',\n AttachmentCollection(self.context, ResourcePath(\"attachments\", self.resource_path)))",
"def downloadAttachments(self, page,localPath):\n return self.pm_getSpaceManager().downloadAttachments(self._unbox(page), localPath)",
"def vm_files_browse(self, vm_path='\\\\', show_deleted_files=False):\r\n return self.browse(vm_path, show_deleted_files, True)",
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def get_attachment(cls, client_object):\n return client_object.ovsdb.Port.get_one(\n search='name=%s' % client_object.name).interfaces",
"def parse_attachments(request):\n attachments = []\n for attachment in request.files.getlist('attachment'):\n attachments.append(Attachment(attachment.filename, attachment))\n return attachments",
"def listAttachments(self, page):\n return tuple(AttachmentProxy.AttachmentProxy(attachmentDict) for attachmentDict in self.pm_getSpaceManager().listAttachments(self._unbox(page)))",
"def get_storage_domain_diskssnapshots_objects(storagedomain, get_href=False):\n from art.rhevm_api.tests_lib.low_level.storagedomains import (\n get_storage_domain_obj\n )\n storage_domain_object = get_storage_domain_obj(storagedomain)\n return DISK_SNAPSHOT_API.getElemFromLink(\n storage_domain_object,\n link_name='disksnapshots',\n attr='disk_snapshot',\n get_href=get_href,\n )",
"def content_list(self):\n return self.face.FACES.files.find({})",
"def attachments(self):\r\n return Attachments(self)",
"def getVmDisk(vmName, alias=None, disk_id=None):\n value = None\n if disk_id:\n prop = \"id\"\n value = disk_id\n elif alias:\n prop = \"name\"\n value = alias\n else:\n logger.error(\"No disk identifier or name was provided\")\n return None\n return get_disk_obj_from_disk_attachment(\n get_disk_attachment(vmName, value, prop)\n )",
"def getPostAttachment(self,id,filename):\n # GET /posts/$id/attachments/$filename\n pass",
"def items(self) -> typing.List[\"VolumeAttachment\"]:\n return typing.cast(\n typing.List[\"VolumeAttachment\"],\n self._properties.get(\"items\"),\n )",
"def attach(self,\n names,\n vm):\n results = []\n for name in names:\n volume_info = self.cm.find_name(name)\n if volume_info and volume_info[0]['State'] != \"deleted\":\n vms = volume_info[0]['AttachedToVm']\n path = volume_info[0]['path']\n if vm in vms:\n Console.error(f\"{name} already attached to {vm}\")\n else:\n result = self.mount(path=f\"{path}/{name}\", vm=vm)\n mounts = result['mounts']\n if f\"{path}/{name}\" in mounts.keys():\n vms.append(vm)\n\n result = self.update_volume_after_attached_to_vm(\n info=volume_info, vms=vms)\n results.append(result)\n else:\n Console.error(\n \"volume is not existed or volume had been deleted\")\n return results[0]"
] | [
"0.69518954",
"0.6364521",
"0.62688756",
"0.6195739",
"0.6097033",
"0.59367794",
"0.57009",
"0.5675643",
"0.56053907",
"0.55326456",
"0.55198544",
"0.55018055",
"0.5492309",
"0.54542166",
"0.5448821",
"0.54335314",
"0.53960615",
"0.537463",
"0.5373918",
"0.53636795",
"0.5357369",
"0.53479165",
"0.5289036",
"0.5283386",
"0.5281671",
"0.52453935",
"0.5231723",
"0.52226293",
"0.52131855",
"0.5210737"
] | 0.78496575 | 0 |
Returns a disk attachment object | def get_disk_attachment(name, disk, attr='id', object_type='vm'):
disk_list = get_disk_attachments(name, object_type=object_type)
disk_id = None
if attr == 'name' or attr == 'alias':
for disk_obj in disk_list:
disk_obj_alias = get_disk_obj(
disk_obj.get_id(), attribute='id'
).get_alias()
if disk_obj_alias == disk:
disk_id = disk_obj.get_id()
break
elif attr == 'id':
disk_id = disk
for disk in disk_list:
if disk.get_id() == disk_id:
return disk
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disk_obj_from_disk_attachment(disk_attachment):\n return get_disk_obj(disk_attachment.get_id(), 'id')",
"def prepare_disk_attachment_object(disk_id=None, **kwargs):\n disk = kwargs.pop(\"disk\", None)\n disk_obj = disk if disk else prepare_ds_object(\"Disk\", id=disk_id)\n return prepare_ds_object(\"DiskAttachment\", disk=disk_obj, **kwargs)",
"def get_disk_attachments(name, object_type='vm', get_href=False):\n api = get_api(object_type, \"%ss\" % object_type)\n obj = api.find(name)\n return DISK_ATTACHMENTS_API.getElemFromLink(obj, get_href=get_href)",
"def attachment(self, attachment_id):\r\n return Attachment(self, attachment_id)",
"def get_disk_obj(disk_alias, attribute='name'):\n return DISKS_API.find(disk_alias, attribute=attribute)",
"def get_as_attachment(self):\n return self.as_attachment",
"def disk(self):\n return self.__disk",
"def _getDisk(self):\n try:\n disk = self.parents[0]\n except IndexError:\n disk = None\n return disk",
"def get_attachment(self, attachment_name):\n attachment = self.get_attachment_meta(attachment_name)\n with attachment.open() as content:\n return content.read()",
"def getAttachment(self):\n log_func.warning(u'The method of obtaining the object attached to the anchor control is not defined')\n return None",
"def getAttachment(self, page, name, version = None):\n return AttachmentProxy.AttachmentProxy(self.pm_getSpaceManager().getAttachment(self._unbox(page), name, version))",
"def __init__(self, attachment=None, *, parent=None, **kwargs):\n kwargs.setdefault('protocol', getattr(parent, 'protocol', None))\n kwargs.setdefault('main_resource',\n getattr(parent, 'main_resource', None))\n\n super().__init__(**kwargs)\n self.name = None\n self.attachment_type = 'file'\n self.attachment_id = None\n self.content_id = None\n self.is_inline = False\n self.attachment = None\n self.content = None\n self.on_disk = False\n self.on_cloud = kwargs.get('on_cloud', False)\n self.size = None\n\n if attachment:\n if isinstance(attachment, dict):\n if self._cloud_data_key in attachment:\n # data from the cloud\n attachment = attachment.get(self._cloud_data_key)\n self.attachment_id = attachment.get(self._cc('id'), None)\n self.content_id = attachment.get(self._cc('contentId'), None)\n self.is_inline = attachment.get(self._cc('IsInline'), False)\n self.name = attachment.get(self._cc('name'), None)\n self.content = attachment.get(self._cc('contentBytes'),\n None)\n self.attachment_type = 'item' if 'item' in attachment.get(\n '@odata.type', '').lower() else 'file'\n self.on_disk = False\n self.size = attachment.get(self._cc('size'), None)\n else:\n file_path = attachment.get('path', attachment.get('name'))\n if file_path is None:\n raise ValueError('Must provide a valid \"path\" or '\n '\"name\" for the attachment')\n self.content = attachment.get('content')\n self.on_disk = attachment.get('on_disk')\n self.attachment_id = attachment.get('attachment_id')\n self.attachment = Path(file_path) if self.on_disk else None\n self.name = (self.attachment.name if self.on_disk\n else attachment.get('name'))\n self.size = self.attachment.stat().st_size if self.attachment else None\n\n elif isinstance(attachment, str):\n self.attachment = Path(attachment)\n self.name = self.attachment.name\n elif isinstance(attachment, Path):\n self.attachment = attachment\n self.name = self.attachment.name\n elif isinstance(attachment, (tuple, list)):\n # files with custom names or Inmemory objects\n file_obj, custom_name = attachment\n if isinstance(file_obj, BytesIO):\n # in memory objects\n self.size = file_obj.getbuffer().nbytes\n self.content = base64.b64encode(file_obj.getvalue()).decode('utf-8')\n else:\n self.attachment = Path(file_obj)\n self.name = custom_name\n\n elif isinstance(attachment, AttachableMixin):\n # Object that can be attached (Message for example)\n self.attachment_type = 'item'\n self.attachment = attachment\n self.name = attachment.attachment_name\n self.content = attachment.to_api_data()\n self.content['@odata.type'] = attachment.attachment_type\n\n if self.content is None and self.attachment and self.attachment.exists():\n with self.attachment.open('rb') as file:\n self.content = base64.b64encode(file.read()).decode('utf-8')\n self.on_disk = True\n self.size = self.attachment.stat().st_size",
"def build_attachment1():\n \n attachment = Attachment()\n attachment.file_content = (\"TG9yZW0gaXBzdW0gZG9sb3Igc2l0IGFtZXQsIGNvbnNl\"\n \"Y3RldHVyIGFkaXBpc2NpbmcgZWxpdC4gQ3JhcyBwdW12\")\n attachment.file_type = \"application/pdf\"\n attachment.file_name = \"balance_001.pdf\"\n attachment.disposition = \"attachment\"\n attachment.content_id = \"Balance Sheet\"\n return attachment",
"def disk(self) -> HwDisk:\n return self._disk",
"def Attachment(self, table, sys_id=None):\n return Attachment(self, table, sys_id)",
"def get_attachment(cls, client_object):\n return client_object.ovsdb.Port.get_one(\n search='name=%s' % client_object.name).interfaces",
"def get_file(self, gmail_client=None, **kwargs) -> File:\n if gmail_client is None:\n raise TypeError(\"Required keyword argument: gmail_client must not be None.\")\n\n attachment = gmail_client._raw_gmail_attachment(self.message_id, self.attachment_id)\n file_data = base64.urlsafe_b64decode(attachment[\"data\"].encode(\"UTF-8\"))\n return File(self.attachment_name, BytesIO(file_data))",
"def disk(self):\n return self._context.get(\"disk\", None)",
"def new_attachment(self, context, payload):\n\n message_id = payload['id']\n parts = payload['payload']['parts']\n\n for part in parts:\n if part['mimeType'] == \"application/octet-stream\" and part['filename']:\n att_id = part['body']['attachmentId']\n\n data = {\n \"message_id\": message_id,\n \"attachment_id\": att_id\n }\n\n return GmailApi.attachment(context, data)",
"def getAttachment(mail, directory=detach_dir):#Download attachment to directory & return filename\n filename = []\n for part in mail.walk():\n if part.get_content_maintype() == 'multipart':\n continue\n if part.get('Content-Disposition') is None:\n continue\n\n filename = part.get_filename()\n att_path = os.path.join(directory, filename)\n\n if not os.path.isfile(att_path) :\n fp = open(att_path, 'wb')\n fp.write(part.get_payload(decode=True))\n fp.close()\n\n return filename",
"def disk_fxt(request):\n disk = request.param\n disk.download()\n return disk",
"def get_attachment(self,\n attachment_name: Text) -> Optional[test_record.Attachment]:\n # Check current running phase state for the attachment name first.\n if self.running_phase_state:\n if attachment_name in self.running_phase_state.phase_record.attachments:\n attachment = self.running_phase_state.phase_record.attachments.get(\n attachment_name)\n return copy.deepcopy(attachment)\n\n for phase_record in self.test_record.phases:\n if attachment_name in phase_record.attachments:\n attachment = phase_record.attachments[attachment_name]\n return copy.deepcopy(attachment)\n\n self.state_logger.warning('Could not find attachment: %s', attachment_name)\n return None",
"def _create_attachment(self, filename, content, mimetype=None):\n if mimetype is None:\n mimetype, _ = mimetypes.guess_type(filename)\n if mimetype is None:\n mimetype = DEFAULT_ATTACHMENT_MIME_TYPE\n basetype, subtype = mimetype.split('/', 1)\n if basetype == 'text':\n encoding = self.encoding or settings.DEFAULT_CHARSET\n attachment = SafeMIMEText(smart_str(content,\n settings.DEFAULT_CHARSET), subtype, encoding)\n else:\n # Encode non-text attachments with base64.\n attachment = MIMEBase(basetype, subtype)\n attachment.set_payload(content)\n encoders.encode_base64(attachment)\n if filename:\n try:\n filename = filename.encode('ascii')\n except UnicodeEncodeError:\n filename = Header(filename, 'utf-8').encode()\n attachment.add_header('Content-Disposition', 'attachment',\n filename=filename)\n return attachment",
"def ParseDiskResourceFromAttachedDisk(resources, attached_disk):\n try:\n disk = resources.Parse(\n attached_disk.source, collection='compute.regionDisks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n try:\n disk = resources.Parse(attached_disk.source, collection='compute.disks')\n if disk:\n return disk\n except (cloud_resources.WrongResourceCollectionException,\n cloud_resources.RequiredFieldOmittedException):\n pass\n\n raise cloud_resources.InvalidResourceException('Unable to parse [{}]'.format(\n attached_disk.source))",
"def get_by_filename(self, filename):\n return Attachment(self.context, ServiceOperationPath(\"GetByFileName\", [filename], self.resource_path))",
"def factory(attachment):\n return Attachment(\n from_url=attachment['from_url'] if 'from_url' in attachment else None,\n image_url=attachment['image_url'] if 'image_url' in attachment else None,\n original_url=attachment['original_url'] if 'original_url' in attachment else None,\n text=attachment['text'] if 'text' in attachment else None)",
"def getPostAttachment(self,id,filename):\n # GET /posts/$id/attachments/$filename\n pass",
"def getVmDisk(vmName, alias=None, disk_id=None):\n value = None\n if disk_id:\n prop = \"id\"\n value = disk_id\n elif alias:\n prop = \"name\"\n value = alias\n else:\n logger.error(\"No disk identifier or name was provided\")\n return None\n return get_disk_obj_from_disk_attachment(\n get_disk_attachment(vmName, value, prop)\n )",
"def file(self, file_id):\r\n return files.File(self, file_id)",
"def downloadAttachment(self, page, name, localPath, version = None):\n return self.pm_getSpaceManager().downloadAttachment(self._unbox(page), name, localPath, version)"
] | [
"0.779783",
"0.72560024",
"0.6928796",
"0.6578346",
"0.6403104",
"0.63818413",
"0.6339139",
"0.63055414",
"0.6187242",
"0.6176985",
"0.61555827",
"0.61421245",
"0.6115087",
"0.6013763",
"0.6011736",
"0.6002136",
"0.591897",
"0.5893593",
"0.58661467",
"0.58532095",
"0.5836235",
"0.583016",
"0.58225965",
"0.5822064",
"0.5813917",
"0.5811843",
"0.58025926",
"0.5772849",
"0.5771206",
"0.5750334"
] | 0.75038487 | 1 |
Get all disks in the system except the OVF store disks | def get_non_ovf_disks():
return [
d.get_id() for d in get_all_disks() if (
d.get_alias() != ENUMS['ovf_disk_alias']
)
] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def getzKVMdisks():\n result = []\n\n devices = getAllHardDisks()\n\n # get disk that has 7 partitions\n for dev in devices:\n parts = getPartitions(dev)\n\n if len(parts) == 7:\n result.append(dev)\n\n return result",
"def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n return findall(\"[A-Z]:\", str(logicaldisks.stdout))",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;",
"async def get_disks(self, oid):\n pool = await self.query([('id', '=', oid)], {'get': True})\n if not pool['is_decrypted']:\n yield\n async for i in await self.middleware.call('zfs.pool.get_disks', pool['name']):\n yield i",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def ListVdisks(self, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks\"\n return self.client.get(uri, None, headers, query_params, content_type)",
"def _ListUsbDisks(self):\n disk_list = []\n for disk in glob.glob('/sys/block/sd*'):\n with open(disk + '/removable', 'r') as fd:\n if int(fd.readline()) == 1:\n device = '/dev/%s' % disk.split('/')[-1]\n manuf = self._GetDiskInfo(disk, 'manufacturer')\n product = self._GetDiskInfo(disk, 'product')\n capacity = self._GetDiskCapacity(device)\n if capacity:\n desc = '%s: %s %s %d GB' % (device, manuf, product, capacity)\n disk_list.append([device, manuf, product, capacity, desc])\n return disk_list",
"async def get_disks(self, include_temperature: bool = False) -> List[CachingDisk]:\n return await self._disk_fetcher.get_disks(\n include_temperature=include_temperature,\n )",
"def getDiskDrives(self, df: str = None, ts: str = None, cursor: str = None, pageSize: int = None):\n params = {\n 'df': df,\n 'ts': ts,\n 'cursor': cursor,\n 'pageSize': pageSize\n }\n return self.api_get_request(f'{self.NINJA_API_QUERIES_DISKS}', params=params)",
"def ListDisks(self) -> Dict[str, 'AZComputeDisk']:\n disks = self.az_account.compute.ListDisks(\n resource_group_name=self.resource_group_name)\n vm_disks = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name).storage_profile\n vm_disks_names = [disk.name for disk in vm_disks.data_disks]\n vm_disks_names.append(vm_disks.os_disk.name)\n return {disk_name: disks[disk_name] for disk_name in vm_disks_names}",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def get_disks_name(hw_lst, without_bootable=False):\n disks = []\n for entry in hw_lst:\n if entry[0] == 'disk' and entry[2] == 'size':\n if without_bootable and is_booted_storage_device(entry[1]):\n sys.stderr.write(\"Skipping disk %s in destructive mode, \"\n \"this is the booted device !\\n\" % entry[1])\n elif 'I:' in entry[1]:\n pass\n else:\n disks.append(entry[1])\n return disks",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def disks(self) -> List[CachingDisk]:\n return self._disk_fetcher.disks",
"def disk_ids(self):\n return list(self._disks)",
"def scan_disks():\n disks = get_disks()\n\n # Get disk details\n for disk in disks:\n # Get partition style\n disk['Table'] = get_table_type(disk)\n\n # Get disk name/model and physical details\n disk.update(get_disk_details(disk))\n\n # Get partition info for disk\n disk['Partitions'] = get_partitions(disk)\n\n for partition in disk['Partitions']:\n # Get partition details\n partition.update(get_partition_details(disk, partition))\n\n # Done\n return disks",
"def vm_diskfilter(self):\r\n vm_diskfilter = []\r\n if self._vmDiskFilter is not None:\r\n subclient_diskfilter = self._vmDiskFilter\r\n\r\n if 'filters' in subclient_diskfilter:\r\n filters = subclient_diskfilter['filters']\r\n\r\n for child in filters:\r\n filter_type_id = str(child['filterType'])\r\n filter_type = self.filter_types[str(child['filterType'])]\r\n vm_id = child['vmGuid'] if 'vmGuid' in child else None\r\n filter_name = child['filter']\r\n\r\n temp_dict = {\r\n 'filter': filter_name,\r\n 'filterType': filter_type,\r\n 'vmGuid': vm_id,\r\n 'filterTypeId': filter_type_id\r\n }\r\n\r\n vm_diskfilter.append(temp_dict)\r\n else:\r\n vm_diskfilter = self._vmDiskFilter\r\n\r\n if len(vm_diskfilter) == 0:\r\n vm_diskfilter = None\r\n return vm_diskfilter",
"def list_rem_drives() -> List[Drive]:\n proc = subprocess.run(\n args=[\n 'powershell',\n '-noprofile',\n '-command',\n 'Get-WmiObject -Class Win32_LogicalDisk | Select-Object deviceid,volumename,drivetype | ConvertTo-Json'\n ],\n text=True,\n stdout=subprocess.PIPE\n )\n if proc.returncode != 0 or not proc.stdout.strip():\n print('Failed to enumerate drives')\n return []\n devices = json.loads(proc.stdout)\n\n drive_types = {\n 0: 'Unknown',\n 1: 'No Root Directory',\n 2: 'Removable Disk',\n 3: 'Local Disk',\n 4: 'Network Drive',\n 5: 'Compact Disc',\n 6: 'RAM Disk',\n }\n\n drives = [Drive(\n letter=d['deviceid'],\n label=d['volumename'],\n drive_type=drive_types[d['drivetype']]\n ) for d in devices]\n res=[]\n for drive in drives:\n if drive.is_removable:\n res.append(drive)\n return res",
"def disk_partitions(all=False):\n phydevs = []\n f = open(\"/proc/filesystems\", \"r\")\n for line in f:\n if not line.startswith(\"nodev\"):\n phydevs.append(line.strip())\n\n retlist = []\n f = open('/etc/mtab', \"r\")\n for line in f:\n if not all and line.startswith('none'):\n continue\n fields = line.split()\n device = fields[0]\n mountpoint = fields[1]\n fstype = fields[2]\n if not all and fstype not in phydevs:\n continue\n if device == 'none':\n device = ''\n ntuple = disk_ntuple(device, mountpoint, fstype)\n retlist.append(ntuple)\n return retlist",
"def show_disks(self):\n icon = Icons.Icons() # Icon\n\n # For every disk, listing information\n icon.show_icon(\"disk\")\n for disk in DISKS:\n self.__get_info(disk)",
"def get_disks(self):\n # root node\n root = ElementTree.fromstring(self.libvirt_domain.XMLDesc())\n\n # search <disk type='file' device='disk'> entries\n disks = root.findall(\"./devices/disk[@device='disk']\")\n\n # for every disk get drivers, sources and targets\n drivers = [disk.find(\"driver\").attrib for disk in disks]\n sources = [disk.find(\"source\").attrib for disk in disks]\n targets = [disk.find(\"target\").attrib for disk in disks]\n\n # iterate drivers, sources and targets\n if len(drivers) != len(sources) != len(targets):\n raise RuntimeError(\"Drivers, sources and targets lengths are different %s:%s:%s\" % (\n len(drivers), len(sources), len(targets)))\n\n disk_info = namedtuple('DiskInfo', ['device', 'file', 'format'])\n\n # all disks info\n disks_info = []\n\n for i in range(len(sources)):\n disks_info.append(disk_info(targets[i][\"dev\"], sources[i][\"file\"], drivers[i][\"type\"]))\n\n return disks_info",
"def get_disks(self):\n result = {}\n\n exp = self.config['devices']\n reg = re.compile(exp)\n fs_types = set(self.config['fs_types'].split(','))\n\n try:\n fp = open('/proc/mounts')\n for line in fp:\n columns = line.split()\n device = columns[0].strip('/').replace('dev/','',1)\n mount_point = columns[1]\n fs_type = columns[2]\n\n if not reg.match(device):\n continue\n\n if fs_type not in fs_types:\n continue\n\n result[device] = mount_point\n except Exception as e:\n self.log.debug('Could not read /proc/mounts!')\n self.log.exception(e)\n finally:\n fp.close()\n return result",
"def _filterPhysicalDisks(self,data):\n\t\tvd = False\n\t\tif re.search(\"virtual.*disk\",\"\".join(data),re.I):\n\t\t\tvd = True\n\n\t\tindex = -1\n\t\tfor item in data:\n\t\t\tif re.search(\"Unconfigured.Physical.Disks\",item):\n\t\t\t\tindex = data.index(item)\n\t\t\t\tbreak\n\t\telse:\n\t\t\tno_config = self.serial.buffer.get_region(5,5,8,32)\n\t\t\tif re.search(\"no configuration present\",\"\".join(no_config),re.I):\n\t\t\t\tpdisk_info = self.serial.buffer.get_region(7,55,8,75)\t\n\t\t\t\tpdCount = re.search(\"PD Count.*:(.*\\d+).*\",\"\".join(pdisk_info))\n\t\t\t\tif pdCount:\n\t\t\t\t\tpdCount = pdCount.group(1).strip()\n\t\t\t\t\treturn range(int(pdCount))\t\t\n\t\t\tif vd:\n\t\t\t\treturn \"\",False\n\t\t\tprint \"No free Physical disks are available -> FAILED\"\n\t\t\tself.serial.conn.close()\n\t\t\tself.serial.thread.kill()\n\t\t\tself.serial.thread.kill()\n\t\t\tsys.exit(1)\n\n\t\tdisks = []\n\t\tfor i in range(index,len(data)):\n\t\t\treObj = re.search(\"\\d{1,2}:\\d{1,2}:\\d{1,2}\",data[i])\n\t\t\tif reObj:\n\t\t\t\tdisks.append(reObj.group(0))\n\n\t\treturn disks",
"def get_devices():\n devices, errors = [], []\n\n for path in hookenv.action_get('devices').split(' '):\n path = path.strip()\n if not os.path.isabs(path):\n errors.append('{}: Not absolute path.'.format(path))\n elif not os.path.exists(path):\n errors.append('{}: Device does not exist.'.format(path))\n else:\n devices.append(path)\n\n if errors:\n raise ZapDiskError(\", \".join(errors))\n\n return devices",
"def get_ordered_filesystems(vm):\n fss = list(vm.filesystems)\n for disk in vm.disks:\n fss += [part.fs for part in disk.partitions]\n fss.sort(lambda x,y: len(x.mntpnt or '')-len(y.mntpnt or ''))\n return fss"
] | [
"0.73597276",
"0.7200828",
"0.69137365",
"0.6862478",
"0.6828824",
"0.6764747",
"0.6552665",
"0.65480185",
"0.65238166",
"0.6423896",
"0.63987154",
"0.6374826",
"0.63124114",
"0.6271041",
"0.62678945",
"0.6261637",
"0.62305725",
"0.61843395",
"0.6136389",
"0.6070867",
"0.6056205",
"0.6026711",
"0.5999982",
"0.5988752",
"0.5980926",
"0.59633446",
"0.5883421",
"0.58701265",
"0.58655465",
"0.583011"
] | 0.7961894 | 0 |
Get the qcow_version info from disk name or id | def get_qcow_version_disk(disk_name, attribute='name'):
return get_disk_obj(disk_name, attribute).get_qcow_version() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _fetch_disk_info(resource_group_name, disk_name):\n show_disk_command = 'az disk show -g {g} -n {name} --query [sku.name,location,osType,hyperVGeneration] -o json'.format(g=resource_group_name, name=disk_name)\n disk_info = loads(_call_az_command(show_disk_command))\n # Note that disk_info will always have 4 elements if the command succeeded, if it fails it will cause an exception\n sku, location, os_type, hyper_v_version = disk_info[0], disk_info[1], disk_info[2], disk_info[3]\n return (sku, location, os_type, hyper_v_version)",
"def get_voluuid(disk_object):\n return disk_object.get_image_id()",
"def GetVdiskInfo(self, vdiskid, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks/\"+vdiskid\n return self.client.get(uri, None, headers, query_params, content_type)",
"def get_info(volpath):\n dhandle = vol_open_path(volpath, VMDK_OPEN_DISKCHAIN_NOIO)\n\n if not disk_is_valid(dhandle):\n logging.warning(\"Failed to open disk - %s\", volpath)\n return None\n\n sinfo = disk_info()\n res = lib.DiskLib_GetSize(dhandle, 0, VMDK_MAX_SNAPS, byref(sinfo))\n\n lib.DiskLib_Close(dhandle)\n if res != 0:\n logging.warning(\"Failed to get size of disk %s - %x\", volpath, res)\n return None\n\n return {VOL_SIZE: convert(sinfo.size), VOL_ALLOC: convert(sinfo.allocated)}",
"def get_tag(node) -> str:\n version, err = node.exec_command(cmd=\"ceph -v\")\n return \"v\" + re.search(r\"[0-9]+(\\.[0-9]+)+\", version).group(0)",
"def _get_version():\n try:\n code, output = _run_cmd('git', 'describe', '--tags')\n if code:\n return 'unknown'\n output = output.decode('utf8').strip().split('-')\n if len(output) != 3:\n return 'unknown'\n version = '%s+%s' % (output[0], output[2])\n\n code, _ = _run_cmd('git', 'diff', '--quiet')\n if code:\n version += '+dirty'\n\n return version\n except OSError:\n return 'unknown'",
"def get_os_version(instance):\n if instance.cloud == 'aws':\n client = boto3.client('ec2', instance.region)\n image_id = client.describe_instances(InstanceIds=[instance.id])['Reservations'][0]['Instances'][0]['ImageId']\n return '16.04' if '16.04' in client.describe_images(ImageIds=[image_id])['Images'][0]['Name'] else '14.04'\n if instance.cloud == 'gcp':\n credentials = GoogleCredentials.get_application_default()\n compute = discovery.build('compute', 'v1', credentials=credentials)\n for disk in compute.instances().get(instance=instance.name,\n zone=instance.zone,\n project=instance.project).execute()['disks']:\n if not disk.get('boot'):\n continue\n for value in disk.get('licenses', []):\n if '1604' in value:\n return '16.04'\n if '1404' in value:\n return '14.04'\n return '14.04'\n return '14.04'",
"def get_volume_info(host, disk_object, dc_obj):\n host_resource = get_host_resource_by_name(host)\n\n vol_id = disk_object.get_image_id()\n sd_id = disk_object.get_storage_domains().get_storage_domain()[0].get_id()\n image_id = disk_object.get_id()\n sp_id = dc_obj.get_id()\n\n args = {\n \"storagepoolID\": sp_id,\n \"storagedomainID\": sd_id,\n \"imageID\": image_id,\n \"volumeID\": vol_id,\n }\n\n return host_resource.vds_client(cmd=\"Volume.getInfo\", args=args)",
"def GenericGetDiskInfo(self, uuid=None, name=None):\n if uuid:\n disk = self.cfg.GetDiskInfo(uuid)\n if disk is None:\n raise errors.OpPrereqError(\"No disk was found with this UUID: %s\" %\n uuid, errors.ECODE_INVAL)\n elif name:\n disk = self.cfg.GetDiskInfoByName(name)\n if disk is None:\n raise errors.OpPrereqError(\"No disk was found with this name: %s\" %\n name, errors.ECODE_INVAL)\n else:\n raise errors.ProgrammerError(\"No disk UUID or name was given\")\n\n return disk",
"def get_info():\n\n #Determine if running on Linux or Mac.\n if platform.system() == 'Linux':\n linux = True\n\n elif platform.system() == \"Darwin\":\n linux = False\n\n if linux:\n from . import linux\n linux.get_info()\n diskinfo = linux.DISKINFO\n\n else:\n from . import macos\n macos.get_info()\n diskinfo = macos.DISKINFO\n\n return diskinfo",
"def name(self):\n return '%s.qcow2' % self._name",
"def get_volume_info(self, uid):\n LOG.debug(\"Entering\")\n cmd = \"svcinfo lsvdisk -bytes -filtervalue vdisk_UID=%s -delim :\" % uid\n output = self._svc_command(cmd)[0]\n\n if len(output) != 2:\n raise SVCVolumeNotFound(\n _(\"Couldn't find volume information for UID %s\") % uid)\n\n header = output[0].split(':')\n values = output[1].split(':')\n index = header.index(SVC_KEY_VDISK_ID)\n diskId = values[index]\n index = header.index(SVC_KEY_VDISK_NAME)\n name = values[index]\n index = header.index(SVC_KEY_VOLUME_GROUP)\n volumeGroup = values[index]\n index = header.index(SVC_KEY_VDISK_CAPACITY)\n capacity = values[index]\n\n info = {SVC_KEY_VDISK_ID: diskId,\n SVC_KEY_VDISK_NAME: name,\n SVC_KEY_VOLUME_GROUP: volumeGroup,\n SVC_KEY_VDISK_CAPACITY: capacity}\n\n LOG.debug(\"Exiting\")\n return info",
"def get_version_info(self):\n\n try:\n nt_header = self.get_nt_header()\n except ValueError, ve:\n return obj.NoneObject(\"PE file failed initial sanity checks: {0}\".format(ve))\n\n try:\n unsafe = self.obj_vm.get_config().UNSAFE\n except AttributeError:\n unsafe = False\n\n for sect in nt_header.get_sections(unsafe):\n if str(sect.Name) == '.rsrc':\n root = obj.Object(\"_IMAGE_RESOURCE_DIRECTORY\", self.obj_offset + sect.VirtualAddress, self.obj_vm)\n for rname, rentry, rdata in root.get_entries():\n # We're a VERSION resource and we have subelements\n if rname == resource_types['RT_VERSION'] and rentry:\n for sname, sentry, sdata in rdata.get_entries():\n # We're the single sub element of the VERSION\n if sname == 1 and sentry:\n # Get the string tables\n for _stname, stentry, stdata in sdata.get_entries():\n if not stentry:\n return obj.Object(\"_VS_VERSION_INFO\", offset = (stdata.DataOffset + self.obj_offset), vm = self.obj_vm)\n\n return obj.NoneObject(\"Cannot find a _VS_VERSION_INFO structure\")",
"def qemu_img_info(path):\n if not os.path.exists(path):\n return QemuImgInfo()\n\n out, err = utils.execute('env', 'LC_ALL=C', 'LANG=C',\n 'qemu-img', 'info', path)\n return QemuImgInfo(out)",
"def show_vdisk(client, resource_group_name, vm_name, disk_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n for disk in virtual_machine.disks:\n if disk.virtual_disk_name == disk_name:\n return disk\n return None",
"def GetVersion(image):\n parts = image.name.rsplit('v', 1)\n if len(parts) != 2:\n log.debug('Skipping image with malformed name [%s].', image.name)\n return None\n return parts[1]",
"def get_id_version(crx_path):\n crx_id, ver_str = path.basename(crx_path).split('.crx')[0].split('_', 1)\n ver_str = ver_str.replace('_', '.')\n return crx_id, ver_str",
"def get_firmware_version():\r\n return utils.run('crossystem fwid').stdout.strip()",
"def get_ceph_drv_info():\n disks_info = []\n stat = psutil.disk_io_counters(perdisk=True)\n for drv in get_ceph_disk():\n info = CEPHDiskInfo(drv)\n disk = basename(drv)\n if disk in stat:\n info.rd_cnt = stat[disk].read_count\n info.wr_cnt = stat[disk].write_count\n info.rd_bytes = stat[disk].read_bytes\n info.wr_bytes = stat[disk].write_bytes\n info.rd_time = stat[disk].read_time\n info.wr_time = stat[disk].write_time\n\n disks_info.append(info)\n\n return disks_info",
"def disk_format(self, disk_id):\n try:\n return self.disk(disk_id).find(\"FORMAT\").text\n except AttributeError:\n return None",
"def get_version(ip):\n url='http://{}/ins'.format(ip)\n\n myheaders={'content-type':'application/json'}\n payload={\n \"ins_api\": {\n \"version\": \"1.0\",\n \"type\": \"cli_show\",\n \"chunk\": \"0\",\n \"sid\": \"1\",\n \"input\": \"show version\",\n \"output_format\": \"json\"\n }\n }\n response = requests.post(url,data=json.dumps(payload), headers=myheaders,auth=(nxos_username,nxos_password))\n resp = response.json()['ins_api']['outputs']['output']['body']['kickstart_ver_str']\n return resp",
"def getvg(host, disk):\r\n sshCommand = \"lspv | grep '^%s ' | awk '{print $3}'\" % disk\r\n vgName = sub.Popen([\"ssh\", \"-q\", host, sshCommand],\r\n shell=False, stdout=sub.PIPE, stderr=sub.PIPE\r\n ).communicate()[0].strip()\r\n return vgName",
"def version_bytes(self) -> str:\n return pulumi.get(self, \"version_bytes\")",
"def test_disk(self):\n self.command.package = self.input_ovf\n self.command.file_id = \"file1\"\n self.command.run()\n self.command.finished()\n self.check_diff(\"\"\"\n <ovf:References>\n- <ovf:File ovf:href=\"input.vmdk\" ovf:id=\"file1\" ovf:size=\"{vmdk_size}\" />\n <ovf:File ovf:href=\"input.iso\" ovf:id=\"file2\" ovf:size=\"{iso_size}\" />\n...\n <ovf:Info>Virtual disk information</ovf:Info>\n- <ovf:Disk ovf:capacity=\"1\" ovf:capacityAllocationUnits=\"byte * 2^30\" \\\novf:diskId=\"vmdisk1\" ovf:fileRef=\"file1\" ovf:format=\"http://www.vmware.com/\\\ninterfaces/specifications/vmdk.html#streamOptimized\" />\n </ovf:DiskSection>\n...\n <rasd:AddressOnParent>0</rasd:AddressOnParent>\n- <rasd:ElementName>Hard Drive</rasd:ElementName>\n- <rasd:HostResource>ovf:/disk/vmdisk1</rasd:HostResource>\n- <rasd:InstanceID>6</rasd:InstanceID>\n- <rasd:Parent>3</rasd:Parent>\n- <rasd:ResourceType>17</rasd:ResourceType>\n- </ovf:Item>\n- <ovf:Item>\n- <rasd:AddressOnParent>0</rasd:AddressOnParent>\n <rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>\n\"\"\".format(vmdk_size=self.FILE_SIZE['input.vmdk'],\n iso_size=self.FILE_SIZE['input.iso']))\n self.assertFalse(os.path.exists(os.path.join(self.temp_dir,\n \"input.vmdk\")),\n \"deleted file should not be exported\")",
"def __get_info(self, disk):\n # Checking the disk exists\n\tFNULL = open(os.devnull, 'w') # Defining /dev/null\n\tcheck = subprocess.call(['df', disk], stdout=FNULL,\\\n stderr=subprocess.STDOUT)\n # Disk not found\n if check != 0:\n sys.stdout.write(\"^fg(%s)!E! DiskNotFound^fg()\" % Colors.CL_BASE08)\n # Disk found\n else:\n # Executing command, parsing output and removing empty elements\n cmd = subprocess.Popen(['df', '-H', disk], stdout=subprocess.PIPE)\n cmd_out, cmd_err = cmd.communicate()\n cmd_outparsed = cmd_out.split(' ')\n cmd_outparsed = filter(None, cmd_outparsed)\n\n # Getting information\n disk_name = disk\n disk_size = cmd_outparsed[7]\n disk_used = cmd_outparsed[8]\n disk_available = cmd_outparsed[9]\n disk_percentage = cmd_outparsed[10].translate(None, \"%\")\n disk_percentage = int(disk_percentage)\n\n # Disk Name: ~\n sys.stdout.write(\"^fg(%s)[^fg()\" % Colors.CL_BASE02)\n if disk_name == DISK_DATA:\n sys.stdout.write(\"^fg(%s)~^fg()\" % Colors.CL_BASE0D)\n sys.stdout.write(\"^fg(%s): ^fg()\" % Colors.CL_BASE03)\n # Disk Name: /\n elif disk_name == DISK_ROOT:\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE0D)\n sys.stdout.write(\"^fg(%s): ^fg()\" % Colors.CL_BASE03)\n \n # Disk Percentage: Good\n if 0 <= disk_percentage <= 60:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE0B, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE0B, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n # Disk Percentage: Fair\n elif 61 <= disk_percentage <= 90:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE09, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE09, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n # Disk Percentage: Weak\n elif 91 <= disk_percentage <= 100:\n sys.stdout.write(\"^fg(%s)%s%%^fg()\" % (Colors.CL_BASE08, disk_percentage))\n sys.stdout.write(\"^fg(%s) %s^fg()\" % (Colors.CL_BASE08, disk_used))\n sys.stdout.write(\"^fg(%s)/^fg()\" % Colors.CL_BASE03)\n sys.stdout.write(\"^fg(%s)%s^fg()\" % (Colors.CL_BASE0D, disk_size))\n sys.stdout.write(\"^fg(%s)]^fg()\" % Colors.CL_BASE02)",
"def get_partition_details(disk, partition):\n details = {}\n script = [\n 'select disk {}'.format(disk['Number']),\n 'select partition {}'.format(partition['Number']),\n 'detail partition']\n\n # Diskpart details\n try:\n # Run script\n result = run_diskpart(script)\n except subprocess.CalledProcessError:\n pass\n else:\n # Get volume letter or RAW status\n output = result.stdout.decode().strip()\n tmp = re.search(r'Volume\\s+\\d+\\s+(\\w|RAW)\\s+', output)\n if tmp:\n if tmp.group(1).upper() == 'RAW':\n details['FileSystem'] = RAW\n else:\n details['Letter'] = tmp.group(1)\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Get MBR type / GPT GUID for extra details on \"Unknown\" partitions\n guid = PARTITION_UIDS.get(details.get('Type').upper(), {})\n if guid:\n details.update({\n 'Description': guid.get('Description', '')[:29],\n 'OS': guid.get('OS', 'Unknown')[:27]})\n\n if 'Letter' in details:\n # Disk usage\n try:\n tmp = psutil.disk_usage('{}:\\\\'.format(details['Letter']))\n except OSError as err:\n details['FileSystem'] = 'Unknown'\n details['Error'] = err.strerror\n else:\n details['Used Space'] = human_readable_size(tmp.used)\n\n # fsutil details\n cmd = [\n 'fsutil',\n 'fsinfo',\n 'volumeinfo',\n '{}:'.format(details['Letter'])\n ]\n try:\n result = run_program(cmd)\n except subprocess.CalledProcessError:\n pass\n else:\n output = result.stdout.decode().strip()\n # Remove empty lines from output\n tmp = [s.strip() for s in output.splitlines() if s.strip() != '']\n # Add \"Feature\" lines\n details['File System Features'] = [s.strip() for s in tmp\n if ':' not in s]\n # Split each line on ':' skipping those without ':'\n tmp = [s.split(':') for s in tmp if ':' in s]\n # Add key/value pairs to the details variable and return dict\n details.update({key.strip(): value.strip() for (key, value) in tmp})\n\n # Set Volume Name\n details['Name'] = details.get('Volume Name', '')\n\n # Set FileSystem Type\n if details.get('FileSystem', '') not in ['RAW', 'Unknown']:\n details['FileSystem'] = details.get('File System Name', 'Unknown')\n\n return details",
"def get_uuid(disk):\n\n #TODO\n return \"Unknown\"",
"def get_version_info() -> Tuple[Text, Text]:",
"def dump_version(input_bytes):\n return dump_from_release(input_bytes, \"version\")",
"def collect_k8s_version_info(ns_output_dir, k8s_cli):\n cmd = f\"{k8s_cli} version -o yaml\"\n collect_helper(ns_output_dir, cmd, \"Version.yaml\", \"Version\")"
] | [
"0.59442717",
"0.5840063",
"0.57724935",
"0.5763192",
"0.56448805",
"0.5523646",
"0.54851925",
"0.54775894",
"0.54669577",
"0.54666495",
"0.5381811",
"0.53117114",
"0.52990836",
"0.52868456",
"0.5284956",
"0.52782893",
"0.5276385",
"0.5264113",
"0.52572054",
"0.52468705",
"0.5231414",
"0.5213782",
"0.52008516",
"0.5191038",
"0.5190052",
"0.5165986",
"0.516392",
"0.5160638",
"0.5159148",
"0.51231146"
] | 0.7662928 | 0 |
Return the disks contained in a snapshot | def get_snapshot_disks_by_snapshot_obj(snapshot):
return DISKS_API.getElemFromLink(snapshot) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_disks():\n\n if system() != \"Windows\":\n raise OSError(\"For use with Windows platforms.\")\n\n logicaldisks=run(\n [\"wmic\", \"logicaldisk\", \"get\", \"name\"],\n capture_output=True\n )\n\n return findall(\"[A-Z]:\", str(logicaldisks.stdout))",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def get_storage_domain_diskssnapshots_objects(storagedomain, get_href=False):\n from art.rhevm_api.tests_lib.low_level.storagedomains import (\n get_storage_domain_obj\n )\n storage_domain_object = get_storage_domain_obj(storagedomain)\n return DISK_SNAPSHOT_API.getElemFromLink(\n storage_domain_object,\n link_name='disksnapshots',\n attr='disk_snapshot',\n get_href=get_href,\n )",
"def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine",
"def get_snapshot(project, zone, instance):\n snapshot_disks(project, zone, *get_disks(instance))",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def list_disks(self, instance_name):\n return ['A_DISK']",
"def vm_snapshotlist(args):\n snapshot = args.snapshot\n name = args.name\n config = Kconfig(client=args.client, debug=args.debug, region=args.region, zone=args.zone, namespace=args.namespace)\n k = config.k\n common.pprint(\"Listing snapshots of %s...\" % name)\n snapshots = k.snapshot(snapshot, name, listing=True)\n if isinstance(snapshots, dict):\n common.pprint(\"Vm %s not found\" % name, color='red')\n return\n else:\n for snapshot in snapshots:\n print(snapshot)\n return",
"def get_snapshot_children(self, snapshot):\n LOG.debug('get_snapshot_children starts.')\n pool_name = self.configuration.rbd_pool\n volume_name = \\\n 'volume-%s' % encodeutils.safe_encode(snapshot[\"volume_id\"])\n snap_name = 'snapshot-%s' % encodeutils.safe_encode(snapshot['id'])\n children = list()\n children_on_snap = \\\n self._get_snapshot_children(pool_name, volume_name, snap_name)\n if children_on_snap is not None:\n for child in children_on_snap:\n item = dict()\n if len(child) == 2:\n item[\"pool_name\"] = child[0]\n item[\"volume_name\"] = child[1]\n if child[1].startswith(\"volume-\"):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1][len(\"volume-\"):]\n elif uuidutils.is_uuid_like(child[1]):\n item[\"type\"] = \"volume\"\n item[\"uuid\"] = child[1]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n children.append(item)\n\n LOG.debug('snapshot children: %s', children)\n LOG.debug('get_snapshot_children finished.')\n return children",
"def derived_snapshots(self):\n start_time = time.time()\n log.debug(\"Getting snaps derived from volume {0}.\".format(self.volume_id))\n derived_snapshots = []\n for snap in self.app.cloud_interface.get_all_snapshots():\n try:\n if snap.volume_id == self.volume_id:\n derived_snapshots.append(snap)\n except EC2ResponseError, e:\n log.warning(\"EC2ResponseError getting snapshot status: {0} \"\n \"(code {1}; status {2})\"\n .format(e.message, e.error_code, e.status))\n log.debug(\"Got snaps derived from volume {0} in {1} seconds: {2}\"\n .format(self.volume_id, time.time() - start_time, derived_snapshots))\n return derived_snapshots",
"def scan_disks():\n disks = get_disks()\n\n # Get disk details\n for disk in disks:\n # Get partition style\n disk['Table'] = get_table_type(disk)\n\n # Get disk name/model and physical details\n disk.update(get_disk_details(disk))\n\n # Get partition info for disk\n disk['Partitions'] = get_partitions(disk)\n\n for partition in disk['Partitions']:\n # Get partition details\n partition.update(get_partition_details(disk, partition))\n\n # Done\n return disks",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps",
"def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))",
"def getObjDisks(name, get_href=True, is_template=False):\n response = get_disk_attachments(\n name, 'template' if is_template else 'vm', get_href\n )\n if get_href:\n return response\n return get_disk_list_from_disk_attachments(response)",
"def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs",
"def list_images(self):\n \n logging.debug(\"list_images entered for %s\" % self.machine_name) \n snapshots = cs.list_snapshots()\n res = []\n server_id = self.cloudserver.id\n # find the one for this server\n for snapshot in snapshots:\n img = snapshot.metadata.get(\"instance_uuid\", None)\n # print img\n\n if img == server_id:\n print \"Server %s has snapshot %s\" % (server_id, img)\n res.append(img)\n\n return res",
"def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds",
"def get_volume_snapshots(self, volume):\n LOG.debug('get_volume_snapshot starts')\n pool_name = self.configuration.rbd_pool\n volume_name = 'volume-%s' % encodeutils.safe_encode(volume[\"id\"])\n snaps_on_vol = self._get_volume_snapshots(pool_name, volume_name)\n snapshots = list()\n if snaps_on_vol is not None:\n for snap in snaps_on_vol:\n snap_name = str(snap[\"name\"])\n item = dict()\n if snap_name.startswith(\"snapshot-\"):\n # snapshot directly created on volume.\n item[\"type\"] = \"volume_snap\"\n item[\"uuid\"] = snap_name[len('snapshot-'):]\n elif snap_name.startswith(\"volume-\") and \\\n snap_name.endswith(\".clone_snap\"):\n # snapshot used for create volume on volume.\n item[\"type\"] = \"clone_snap\"\n item[\"uuid\"] = snap_name[len(\"volume-\"):-len(\".clone_snap\")]\n elif snap_name.startswith(\"backup.\") and \".snap.\" in snap_name:\n # snapshot used for backup volume.\n item[\"type\"] = \"backup_snap\"\n item[\"uuid\"] = \\\n snap_name[len(\"backup.\"):snap_name.index(\".snap.\")]\n else:\n item[\"type\"] = \"\"\n item[\"uuid\"] = \"\"\n snapshots.append(item)\n\n LOG.debug('volume snapshots: %s', snapshots)\n LOG.debug('get_volume_snapshots finished.')\n return snapshots",
"def getContainerSnapshots(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/snapshot' % (node,vmid),None)\n return data",
"def ListVdisks(self, headers=None, query_params=None, content_type=\"application/json\"):\n uri = self.client.base_url + \"/vdisks\"\n return self.client.get(uri, None, headers, query_params, content_type)",
"def get_snap_list(mnode):\n\n ret, out, _ = g.run(mnode, \"gluster snapshot list --xml\")\n if ret != 0:\n g.log.error(\"Failed to execute 'snapshot list' on node %s. \"\n \"Hence failed to get the snapshot list.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster snapshot \"\n \"list xml output.\")\n return None\n\n snap_list = []\n for snap in root.findall(\"snapList/snapshot\"):\n snap_list.append(snap.text)\n\n return snap_list",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def ListDisks(self) -> Dict[str, 'AZComputeDisk']:\n disks = self.az_account.compute.ListDisks(\n resource_group_name=self.resource_group_name)\n vm_disks = self.compute_client.virtual_machines.get(\n self.resource_group_name, self.name).storage_profile\n vm_disks_names = [disk.name for disk in vm_disks.data_disks]\n vm_disks_names.append(vm_disks.os_disk.name)\n return {disk_name: disks[disk_name] for disk_name in vm_disks_names}",
"def getzKVMdisks():\n result = []\n\n devices = getAllHardDisks()\n\n # get disk that has 7 partitions\n for dev in devices:\n parts = getPartitions(dev)\n\n if len(parts) == 7:\n result.append(dev)\n\n return result",
"def getSnapshotsD(region):\n # Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)\n snapshots = getSnapshots(region)\n snapshotsDicts = []\n ims = getImages(region)\n for s in snapshots:\n amis = getAmisOf(s, ims)\n amiIds = []\n amiKeeps = []\n\n if len(amis) == 1:\n amiIds = amis[0].id.encode()\n amiKeeps = getKeepTag(amis[0])\n\n elif len(amis) == 0:\n amiIds = \"-------no-AMI-found\"\n amiKeeps = \"-------no-AMI-found\"\n else:\n for a in amis:\n amiIds.append(a.id.encode())\n amiKeeps.append(getKeepTag(a))\n\n snapshotsDict = {\"id\": s.id,\n \"status\": s.status,\n \"region\": s.region.name,\n \"progress\": s.progress,\n \"start_time\": s.start_time,\n \"volume_id\": s.volume_id,\n \"volume_size\": s.volume_size,\n \"KEEP-tag\": getKeepTag(s),\n \"Name\": get_name_tag(s),\n \"AMI(s)\": amiIds,\n \"AMI_KEEP-tags\": amiKeeps,\n \"PROD\": isProduction(s),\n \"Description\": s.description\n }\n snapshotsDicts.append(snapshotsDict)\n return snapshotsDicts",
"def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def list_vdisks(client, resource_group_name, vm_name):\n virtual_machine = client.get(resource_group_name, vm_name)\n return virtual_machine.disks",
"def getGuestDisk(self, oSession, oTxsSession, eStorageController):\n lstDisks = None;\n\n # The naming scheme for NVMe is different and we don't have\n # to query the guest for unformatted disks here because the disk with the OS\n # is not attached to a NVMe controller.\n if eStorageController == vboxcon.StorageControllerType_NVMe:\n lstDisks = [ '/dev/nvme0n1' ];\n else:\n # Find a unformatted disk (no partition).\n # @todo: This is a hack because LIST and STAT are not yet implemented\n # in TXS (get to this eventually)\n lstBlkDev = [ '/dev/sda', '/dev/sdb' ];\n for sBlkDev in lstBlkDev:\n fRc = oTxsSession.syncExec('/usr/bin/ls', ('ls', sBlkDev + '1'));\n if not fRc:\n lstDisks = [ sBlkDev ];\n break;\n\n _ = oSession;\n return lstDisks;"
] | [
"0.6811599",
"0.68013054",
"0.672062",
"0.657291",
"0.65141135",
"0.64920574",
"0.6439014",
"0.64347744",
"0.64077145",
"0.6335327",
"0.63346064",
"0.63235974",
"0.631117",
"0.62916005",
"0.627171",
"0.6241496",
"0.6225746",
"0.6225603",
"0.62238884",
"0.6167665",
"0.61405224",
"0.61305416",
"0.60951006",
"0.6086827",
"0.60560626",
"0.60404575",
"0.6040307",
"0.6015407",
"0.5997148",
"0.5986793"
] | 0.79499185 | 0 |
Returns all disksnapshots objects list in the given storage domain | def get_storage_domain_diskssnapshots_objects(storagedomain, get_href=False):
from art.rhevm_api.tests_lib.low_level.storagedomains import (
get_storage_domain_obj
)
storage_domain_object = get_storage_domain_obj(storagedomain)
return DISK_SNAPSHOT_API.getElemFromLink(
storage_domain_object,
link_name='disksnapshots',
attr='disk_snapshot',
get_href=get_href,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_storage_domains(cohesity_client):\n storage_domain_list = cohesity_client.view_boxes.get_view_boxes()\n for domain in storage_domain_list:\n exported_res_dict[\"Storage Domains\"].append(domain.name)\n return storage_domain_list",
"def get_snapshots(FIELDS='all'):\n snapinfostr = fork_and_get_output(\"zfs list -t snapshot -H -o {0}\".format(FIELDS).split())\n header = get_zfs_snap_header()\n snapinfo = snapinfostr.splitlines()\n snapobjs = []\n for snapstr in snapinfo:\n snapobjs.append(DataZFS(snapstr, header, 'snapshot'))\n return snapobjs",
"def list_(args):\n osf = _setup_osf(args)\n\n project = osf.project(args.project)\n\n for store in project.storages:\n prefix = store.name\n for file_ in store.files:\n path = file_.path\n if path.startswith('/'):\n path = path[1:]\n\n print(os.path.join(prefix, path))",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def get_snapshots(dataset=''):\n # filter my tags\n return os.listdir(dataset + ZFS_DEFAULT_SNAPSHOT_DIR)",
"def domain_object_list(self, domain, paths, cluster=None):\n\n q = (\"select n.path, v.serial, v.node, v.hash, \"\n \"v.size, v.type, v.source, v.mtime, v.muser, \"\n \"v.uuid, v.checksum, v.cluster, a.key, a.value \"\n \"from nodes n, versions v, attributes a \"\n \"where v.serial = a.serial and \"\n \"a.domain = ? and \"\n \"a.node = n.node and \"\n \"a.is_latest = 1 and \"\n \"n.path in (%s)\") % ','.join('?' for _ in paths)\n args = [domain]\n map(args.append, paths)\n if cluster is not None:\n q += \"and v.cluster = ?\"\n args += [cluster]\n\n self.execute(q, args)\n rows = self.fetchall()\n\n group_by = itemgetter(slice(12))\n rows.sort(key=group_by)\n groups = groupby(rows, group_by)\n return [(k[0], k[1:], dict([i[12:] for i in data])) for\n (k, data) in groups]",
"def list_objects(bucket=None):\n hook = GoogleCloudStorageHook()\n storage_objects = hook.list(bucket)\n\n return storage_objects",
"def all_objects(self) -> List[StorageObject]:\n return [item for item in self._store.values()]",
"def database_volume_snapshot_get_list():\n db = database_get()\n\n session = db.session()\n query = session.query(model.VolumeSnapshot)\n\n volume_snapshot_objs = list()\n for volume_snapshot in query.all():\n nfvi_volume_snapshot_data = \\\n json.loads(volume_snapshot.nfvi_volume_snapshot_data)\n nfvi_volume_snapshot = nfvi.objects.v1.VolumeSnapshot(\n nfvi_volume_snapshot_data['uuid'],\n nfvi_volume_snapshot_data['name'],\n nfvi_volume_snapshot_data['description'],\n nfvi_volume_snapshot_data['size_gb'],\n nfvi_volume_snapshot_data['volume_uuid'])\n volume_snapshot_obj = objects.VolumeSnapshot(nfvi_volume_snapshot)\n volume_snapshot_objs.append(volume_snapshot_obj)\n return volume_snapshot_objs",
"def get_snapshots(self, region):\n try:\n conn = ec2.connect_to_region(region, **self.credentials)\n region_snapshots = conn.get_all_snapshots(owner='self')\n except boto.exception.EC2ResponseError:\n return []\n return region_snapshots",
"def get_snapshots(self):\r\n ec2 = self.get_ec2_connection()\r\n rs = ec2.get_all_snapshots()\r\n all_vols = [self.volume_id] + self.past_volume_ids\r\n snaps = []\r\n for snapshot in rs:\r\n if snapshot.volume_id in all_vols:\r\n if snapshot.progress == '100%':\r\n snapshot.date = dateutil.parser.parse(snapshot.start_time)\r\n snapshot.keep = True\r\n snaps.append(snapshot)\r\n snaps.sort(cmp=lambda x,y: cmp(x.date, y.date))\r\n return snaps",
"def get_snapshots(self) -> SnapshotListing:\n return self.snapshots",
"def fs_get_disk_list(self):\n\t\treturn Job(SDK.PrlSrv_FsGetDiskList(self.handle)[0])",
"def find_all():\n return ItopapiPrototype.find_all(ItopapiStorageSystem)",
"def snapshots(self, owner=None, restorable_by=None):\r\n rs = self.connection.get_all_snapshots(owner=owner,\r\n restorable_by=restorable_by)\r\n mine = []\r\n for snap in rs:\r\n if snap.volume_id == self.id:\r\n mine.append(snap)\r\n return mine",
"def list_snapshots(self, account_id=None, max_items=100):\n if not account_id:\n account_id = get_instance_identity_document()['accountId']\n paginator = self.__client.get_paginator('describe_snapshots')\n response = paginator.paginate(OwnerIds=[account_id], PaginationConfig={'MaxItems': max_items}) \\\n .build_full_result()\n\n return EBSSnapshotsList(response)",
"def get_all_disks():\n return DISKS_API.get(abs_link=False)",
"def GetFilesToBackup(domainXml):\n disks = root.findall(\"./devices/disk/source\")\n\n files = []\n for disk in disks:\n files.append(disk.get(\"file\"))\n\n return files",
"def get_objects(si, args):\n # Get datacenter object.\n datacenter_list = si.content.rootFolder.childEntity\n \"\"\"\n if args.datacenter_name:\n datacenter_obj = get_obj_in_list(args.datacenter_name, datacenter_list)\n else:\n \"\"\"\n datacenter_obj = datacenter_list[0]\n\n # Get datastore object.\n datastore_list = datacenter_obj.datastoreFolder.childEntity\n \"\"\"if args.datastore_name:\n datastore_obj = get_obj_in_list(args.datastore_name, datastore_list)\n elif len(datastore_list) > 0:\"\"\"\n datastore_obj = datastore_list[0]\n #else:\n # print \"No datastores found in DC (%s).\" % datacenter_obj.name\n\n # Get cluster object.\n cluster_list = datacenter_obj.hostFolder.childEntity\n \"\"\"if args.cluster_name:\n cluster_obj = get_obj_in_list(args.cluster_name, cluster_list)\n elif len(cluster_list) > 0:\"\"\"\n cluster_obj = cluster_list[0]\n #else:\n # print \"No clusters found in DC (%s).\" % datacenter_obj.name\n\n # Generate resource pool.\n resource_pool_obj = cluster_obj.resourcePool\n\n return {\"datacenter\": datacenter_obj,\n \"datastore\": datastore_obj\n ,\"resource pool\": resource_pool_obj}",
"def list_snapshots(self, detailed=True):\n aname = \"cinder_v%s.list_snapshots\" % self.version\n with atomic.ActionTimer(self, aname):\n return (self._get_client()\n .volume_snapshots.list(detailed))",
"def list_blobs(self, prefix=''):\n return [b.name for b in self.bucket.list_blobs(prefix=prefix)]",
"def storage_pool_get_all(context, marker=None, limit=None, sort_keys=None,\n sort_dirs=None, filters=None, offset=None):\n session = get_session()\n with session.begin():\n # Generate the query\n query = _generate_paginate_query(context, session, models.StoragePool,\n marker, limit, sort_keys, sort_dirs,\n filters, offset,\n )\n # No storage_pool would match, return empty list\n if query is None:\n return []\n return query.all()",
"def test_filesystem_can_list_contents_of_domain(self):\n self.index.photos_unique_captures_of_domain = MagicMock(return_value=[\n '2019-01-13H20:00',\n '2019-01-13H21:00',\n '2019-01-13H22:00',\n ])\n\n expected = [\n Directory('.'),\n Directory('..'),\n Directory('2019-01-13H20:00'),\n Directory('2019-01-13H21:00'),\n Directory('2019-01-13H22:00'),\n Directory(LastCapture.FILENAME),\n ]\n\n files = self.filesystem._list('/example.com')\n self.assertListOfFilesEqual(expected, files)\n\n files = self.filesystem._list('/example.com/')\n self.assertListOfFilesEqual(expected, files)\n\n self.index.photos_unique_captures_of_domain.assert_called_with(\n 'example.com',\n self.refresh_rate\n )",
"def list_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n return [\"s3://\" + bucket_name + \"/\" + key.key for key in bucket.objects.filter(Prefix=prefix)]",
"def getListOfSDs(name = None,selector = None):\n sds = rhevGet(\"/api/datacenters/%s/storagedomains\"%getDcData(rhev_settings.DC,\"id\"))\n doc = libxml2.parseDoc(sds)\n ctxt = doc.xpathNewContext()\n res = ctxt.xpathEval(\"/storage_domains/storage_domain\")\n sdlist = []\n for sd in res:\n sdin = {}\n sdin[\"name\"] = sd.firstElementChild().get_content()\n sdin[\"id\"] = sd.prop(\"id\")\n sdlist.append(sdin)\n result = []\n if name:\n result = [sdin for sdin in sdlist if sdin[\"name\"].find(name) != -1]\n return result or sdlist",
"def get_disks():\n disks = []\n\n try:\n # Run script\n result = run_diskpart(['list disk'])\n except subprocess.CalledProcessError:\n pass\n else:\n # Append disk numbers\n output = result.stdout.decode().strip()\n for tmp in re.findall(r'Disk (\\d+)\\s+\\w+\\s+(\\d+\\s+\\w+)', output):\n num = tmp[0]\n size = human_readable_size(tmp[1])\n disks.append({'Number': num, 'Size': size})\n\n return disks",
"def get_snapshots(self):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/snapshots\"\n\n response = self.connector.http_call(\"get\", _url)\n self.snapshots = response.json()",
"def get_all_disk():\n\t\tdisks = []\n\t\tdisks_lines = linux.exe_shell(\"lsblk -o NAME,VENDOR|grep -P '^sd.*[A-Z]'\")\n\t\tfor line in disks_lines.splitlines():\n\t\t\tdisk_t = line.split()\n\t\t\tif len(disk_t) > 1 and \"LSI\" not in disk_t[1]:\n\t\t\t\tdisks.append(disk_t[0])\n\t\tds = []\n\t\tfor i in disks:\n\t\t\td_t = DiskFromLsiSas3(\"\", i)\n\t\t\td_t.fill_attrs()\n\t\t\tds.append(d_t)\n\t\treturn ds",
"def get_ceph_disk():\n disks = []\n for srv in get_srv_list():\n cfg = get_srv_config(srv)\n for key in ['osd_data', 'osd_journal', 'mds_data', 'mon_data']:\n mnt_point = cfg[key]\n disk = get_disk_by_mountpoint(find_mount_point(mnt_point))\n if disk not in disks:\n disks.append(disk)\n return disks",
"def getVolumesD(region):\n volumes = getVolumes(region)\n instances = getInstancesD(region)\n\n volumesDicts = []\n for v in volumesDicts:\n volumesDict = {\"id\": v.id,\n \"KEEP-tag\": getKeepTag(v),\n \"instance_KEEP-tag\": getKeepTag(getInstanceOf(v)),\n \"instance\": v.attach_data.instance_id,\n \"status\": v.status,\n \"size\": v.size,\n \"create-time\": v.create_time,\n \"region\": v.region.name,\n \"zone\": v.zone,\n \"snapshot_id\": v.snapshot_id,\n \"PROD\": isProduction(v)\n }"
] | [
"0.6704098",
"0.6653081",
"0.6420554",
"0.6378387",
"0.62653655",
"0.61300075",
"0.6005906",
"0.59770525",
"0.5968691",
"0.59580076",
"0.5947921",
"0.5879047",
"0.5876216",
"0.5772094",
"0.57576305",
"0.57467604",
"0.57035834",
"0.5662695",
"0.5657524",
"0.56540245",
"0.56525934",
"0.5644304",
"0.5624237",
"0.5622725",
"0.5600885",
"0.55959284",
"0.55906576",
"0.55876774",
"0.55866516",
"0.557305"
] | 0.7705549 | 0 |
Check if certain disk is attached to VM as Read Only | def get_read_only(vm_name, disk_id):
return get_disk_attachment(vm_name, disk_id).get_read_only() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_mounted_system(self):\n res = self.su_cmd('touch /system/.dwarf_check')\n if res == '':\n res = self._do_adb_command('shell ls -la /system')\n if '.dwarf_check' in res:\n res = self.su_cmd('rm /system/.dwarf_check')\n if res == '':\n return True\n elif res == 'Read-only file system':\n return False\n\n return False",
"def disk_iops_read_only(self) -> Optional[float]:\n return pulumi.get(self, \"disk_iops_read_only\")",
"def storage_can_read(self):\n return True",
"def rw_active(self):\n result = False\n for disk in self.disks:\n result = disk.rw_active() or result\n return result",
"def verify_blob_permissions(self, blob):\n path = self.csum_to_path(blob)\n return is_readonly(path)",
"def _is_volume_mapped(self):\n standard_inquiry = self.device.get_scsi_standard_inquiry()\n # spc4r30 section 6.4.2 tables 140 + 141, peripheral device type 0 is disk, 31 is unknown or no device\n return standard_inquiry.peripheral_device.type == 0",
"def disk_is_valid(dhandle):\n if is_64bits:\n return dhandle.value != c_uint64(0).value\n else:\n return dhandle.value != c_uint32(0).value",
"def update_readonly_flag(self, volume, read_only):\n aname = \"cinder_v%s.update_readonly_flag\" % self.version\n with atomic.ActionTimer(self, aname):\n return self._get_client().volumes.update_readonly_flag(\n volume, read_only)",
"def check_disk_visibility(disk, disks_list):\n is_visible = disk in [disk_obj.get_alias() for disk_obj in disks_list]\n return is_visible",
"def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only_root_filesystem\")",
"def read_only_root_filesystem(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"read_only_root_filesystem\")",
"def isReadOnly(self) -> bool:\n ...",
"def isReadOnly(self) -> bool:\n ...",
"def is_partition(disk): #TODO: Could change to use \"Whole\" attrib. Good idea?\n\n return \"s\" in disk.split(\"disk\")[1]",
"def is_mounted(self):\n try:\n _ = openmediavault.subprocess.check_output(\n [\n 'findmnt',\n '--canonicalize',\n '--first-only',\n '--noheadings',\n '--raw',\n '--nofsroot',\n self.canonical_device_file,\n ]\n )\n return True\n except subprocess.CalledProcessError:\n pass\n return False",
"def _VerifyDiskModification(self, op, params, excl_stor, group_access_types):\n disk_type = params.get(\n constants.IDISK_TYPE,\n self.cfg.GetInstanceDiskTemplate(self.instance.uuid))\n\n if op == constants.DDM_ADD:\n params[constants.IDISK_TYPE] = disk_type\n\n if disk_type == constants.DT_DISKLESS:\n raise errors.OpPrereqError(\n \"Must specify disk type on diskless instance\", errors.ECODE_INVAL)\n\n if disk_type != constants.DT_EXT:\n utils.ForceDictType(params, constants.IDISK_PARAMS_TYPES)\n\n mode = params.setdefault(constants.IDISK_MODE, constants.DISK_RDWR)\n if mode not in constants.DISK_ACCESS_SET:\n raise errors.OpPrereqError(\"Invalid disk access mode '%s'\" % mode,\n errors.ECODE_INVAL)\n\n size = params.get(constants.IDISK_SIZE, None)\n if size is None:\n raise errors.OpPrereqError(\"Required disk parameter '%s' missing\" %\n constants.IDISK_SIZE, errors.ECODE_INVAL)\n size = int(size)\n\n params[constants.IDISK_SIZE] = size\n name = params.get(constants.IDISK_NAME, None)\n if name is not None and name.lower() == constants.VALUE_NONE:\n params[constants.IDISK_NAME] = None\n\n # These checks are necessary when adding and attaching disks\n if op in (constants.DDM_ADD, constants.DDM_ATTACH):\n CheckSpindlesExclusiveStorage(params, excl_stor, True)\n # If the disk is added we need to check for ext provider\n if op == constants.DDM_ADD:\n CheckDiskExtProvider(params, disk_type)\n\n # Make sure we do not add syncing disks to instances with inactive disks\n if not self.op.wait_for_sync and not self.instance.disks_active:\n raise errors.OpPrereqError(\"Can't %s a disk to an instance with\"\n \" deactivated disks and --no-wait-for-sync\"\n \" given\" % op, errors.ECODE_INVAL)\n\n # Check disk access param (only for specific disks)\n if disk_type in constants.DTS_HAVE_ACCESS:\n access_type = params.get(constants.IDISK_ACCESS,\n group_access_types[disk_type])\n if not IsValidDiskAccessModeCombination(self.instance.hypervisor,\n disk_type, access_type):\n raise errors.OpPrereqError(\"Selected hypervisor (%s) cannot be\"\n \" used with %s disk access param\" %\n (self.instance.hypervisor, access_type),\n errors.ECODE_STATE)\n\n if op == constants.DDM_ATTACH:\n if len(params) != 1 or ('uuid' not in params and\n constants.IDISK_NAME not in params):\n raise errors.OpPrereqError(\"Only one argument is permitted in %s op,\"\n \" either %s or uuid\" % (constants.DDM_ATTACH,\n constants.IDISK_NAME,\n ),\n errors.ECODE_INVAL)\n self._CheckAttachDisk(params)\n\n elif op == constants.DDM_MODIFY:\n if constants.IDISK_SIZE in params:\n raise errors.OpPrereqError(\"Disk size change not possible, use\"\n \" grow-disk\", errors.ECODE_INVAL)\n\n disk_info = self.cfg.GetInstanceDisks(self.instance.uuid)\n\n # Disk modification supports changing only the disk name and mode.\n # Changing arbitrary parameters is allowed only for ext disk template\",\n if not utils.AllDiskOfType(disk_info, [constants.DT_EXT]):\n utils.ForceDictType(params, constants.MODIFIABLE_IDISK_PARAMS_TYPES)\n else:\n # We have to check that the 'access' and 'disk_provider' parameters\n # cannot be modified\n for param in [constants.IDISK_ACCESS, constants.IDISK_PROVIDER]:\n if param in params:\n raise errors.OpPrereqError(\"Disk '%s' parameter change is\"\n \" not possible\" % param,\n errors.ECODE_INVAL)\n\n name = params.get(constants.IDISK_NAME, None)\n if name is not None and name.lower() == constants.VALUE_NONE:\n params[constants.IDISK_NAME] = None\n\n if op == constants.DDM_REMOVE and not self.op.hotplug:\n CheckInstanceState(self, self.instance, INSTANCE_NOT_RUNNING,\n msg=\"can't remove volume from a running instance\"\n \" without using hotplug\")",
"def disk_m_bps_read_only(self) -> Optional[float]:\n return pulumi.get(self, \"disk_m_bps_read_only\")",
"def is_read_only(self):\n\t\treturn bool(call_sdk_function('PrlShare_IsReadOnly', self.handle))",
"def _is_booted_from_volume(self, instance, disk_mapping=None):\n return not bool(instance.get('image_ref'))",
"def read_only(self):\n return bool(self.__read_only)",
"def _is_disk_checking_required(cls, node):\n if (node.status in (consts.NODE_STATUSES.ready,\n consts.NODE_STATUSES.deploying,\n consts.NODE_STATUSES.provisioned) or\n (node.status == consts.NODE_STATUSES.error and\n node.error_type != consts.NODE_ERRORS.provision)):\n return False\n\n return True",
"def is_read_only(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"is_read_only\")",
"def check_vault_access(self, did, access_vault=None):\n info = self.get_vault_service(did)\n if not info:\n raise VaultNotFoundException()\n\n # INFO: no need check permission.\n # if (access_vault == VAULT_ACCESS_WR or access_vault == VAULT_ACCESS_DEL) \\\n # and info[VAULT_SERVICE_STATE] == VAULT_SERVICE_STATE_FREEZE:\n # raise ForbiddenException(msg=\"The vault can't be written.\")",
"def is_read_only(self):\n return (self.get_name().startswith(\"b\")\n or self.get_name() == \"jump_cond\" # meta-instruction\n or self.get_name() == \"j\"\n or self.get_name() == \"ld\"\n or self.get_name() == \"lw\"\n or self.get_name() == \"lb\")",
"def update_readonly_flag(self, volume, read_only):\n return self._impl.update_readonly_flag(volume, read_only=read_only)",
"def is_writable(self, offset):\n self.ret = bool(idaapi.getseg(offset).perm & idaapi.SEGPERM_WRITE)\n return self.ret",
"def is_booted_storage_device(disk):\n cmdline = (\"grep -w /ahcexport /proc/mounts | cut -d ' ' -f 1 | \"\n \"sed -e 's/[0-9]*//g'\")\n if '/dev/' not in disk:\n disk = '/dev/%s' % disk\n grep_cmd = subprocess.Popen(cmdline,\n shell=True, stdout=subprocess.PIPE)\n for booted_disk in grep_cmd.stdout:\n booted_disk = booted_disk.decode(errors='ignore')\n booted_disk = booted_disk.rstrip('\\n').strip()\n if booted_disk == disk:\n return True\n return False",
"def check_disk_usage(disk):\n du = shutil.disk_usage(disk)\n free = du.free / du.total * 100\n return free > 20",
"def testIsLocked(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_CS, parent=self._gpt_path_spec,\n volume_index=0)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n\n self.assertIsNotNone(file_entry)\n self.assertTrue(file_entry.IsLocked())",
"def check_perms(resource):\r\n stmode = os.stat(resource).st_mode\r\n return (getattr(stat, 'S_IROTH') & stmode) > 0"
] | [
"0.68377024",
"0.6680292",
"0.6422407",
"0.6302423",
"0.5968654",
"0.59454435",
"0.59246737",
"0.5899567",
"0.58853364",
"0.5862699",
"0.5862699",
"0.58550006",
"0.58550006",
"0.5846145",
"0.58454525",
"0.5780733",
"0.57160926",
"0.56951934",
"0.5689234",
"0.5687709",
"0.566707",
"0.56657183",
"0.5656131",
"0.5655927",
"0.5644349",
"0.5623069",
"0.5597421",
"0.55795693",
"0.5572001",
"0.5569789"
] | 0.7584012 | 0 |
Wait for an event of successful/failed sparsify event starting from the last start sparsify event in the system. | def wait_for_sparsify_event(disk_id, success=True):
import art.rhevm_api.tests_lib.low_level.events as ll_events
disk_name = get_disk_obj(disk_alias=disk_id, attribute='id').get_name()
start_sparsify_query = "\"Started to sparsify %s\"" % disk_name
finished_sparsify_query = (
"%s sparsified successfully" % disk_name if success else
"Failed to sparsify %s" % disk_name
)
last_event_id = ll_events.get_max_event_id(start_sparsify_query)
return ll_events.wait_for_event(
query=finished_sparsify_query, start_id=last_event_id
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def waitUntilSuccess():",
"def wait_for_event(self, event):\n\n\t\tif event == 5:\n\t\t\toutcome = self.wait_for_saccade_start()\n\t\telif event == 6:\n\t\t\toutcome = self.wait_for_saccade_end()\n\t\telif event == 7:\n\t\t\toutcome = self.wait_for_fixation_start()\n\t\telif event == 8:\n\t\t\toutcome = self.wait_for_fixation_end()\n\t\telif event == 3:\n\t\t\toutcome = self.wait_for_blink_start()\n\t\telif event == 4:\n\t\t\toutcome = self.wait_for_blink_end()\n\t\telse:\n\t\t\traise Exception(\"Error in libsmi.SMItracker.wait_for_event: eventcode %s is not supported\" % event)\n\n\t\treturn outcome",
"def wait_on(self, event_stream: IO[str]) -> None:\n # The first event is expected to be socket creation\n initial_event = _parse_server_event(event_stream.readline().strip())\n if isinstance(initial_event, SocketCreated):\n if not self.wait_on_initialization:\n return\n\n # The second event is expected to be server initialization\n second_event = _parse_server_event(event_stream.readline().strip())\n if isinstance(second_event, ServerInitialized):\n return\n\n raise EventParsingException(\n f\"Unexpected second server status update: {second_event}\"\n )\n\n raise EventParsingException(\n f\"Unexpected initial server status update: {initial_event}\"\n )",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"def waitUntilFinished():",
"async def async_wait_on(\n self, event_stream: async_server_connection.TextReader\n ) -> None:\n initial_event = _parse_server_event((await event_stream.readline()).strip())\n if isinstance(initial_event, SocketCreated):\n if not self.wait_on_initialization:\n return\n\n second_event = _parse_server_event((await event_stream.readline()).strip())\n if isinstance(second_event, ServerInitialized):\n return\n\n raise EventParsingException(\n f\"Unexpected second server status update: {second_event}\"\n )\n\n raise EventParsingException(\n f\"Unexpected initial server status update: {initial_event}\"\n )",
"def _wait_for_all_operations_done(self):\n while self._test_names_to_processes:\n time.sleep(10)\n running_test_names = list(self._test_names_to_processes.keys())\n for test_name in running_test_names:\n running_proc = self._test_names_to_processes.get(test_name)\n return_code = running_proc.poll()\n if return_code is not None:\n test_case_state = self._test_names_to_test_states.get(test_name)\n self._handle_failure(running_proc, test_case_state.running_test)\n del self._test_names_to_processes[test_name]\n print('Started validating: {}'.format(test_name))\n test_case_state.running_test.validate_result()\n self._run_test(test_case_state.remaining_tests)",
"async def do_wait(self) -> None:\n async with self.running_wait.needs_run() as needs_run:\n if needs_run:\n for number in self.pending_remove:\n del self.number_to_cb[number]\n self.pending_remove = set()\n maxevents = 32\n if self.input_buf is None:\n self.input_buf = await self.ram.malloc(EpollEventList, maxevents * EpollEvent.sizeof())\n if self.syscall_response is None:\n if self.wait_readable:\n await self.wait_readable()\n self.syscall_response = await self.epfd.task.sysif.submit_syscall(\n SYS.epoll_wait, self.epfd.near, self.input_buf.near, maxevents, self.timeout)\n if self.valid_events_buf is None:\n count = await self.syscall_response.receive()\n self.valid_events_buf, _ = self.input_buf.split(count * EpollEvent.sizeof())\n received_events = await self.valid_events_buf.read()\n self.input_buf = None\n self.valid_events_buf = None\n self.syscall_response = None\n for event in received_events:\n if event.data not in self.pending_remove:\n self.number_to_cb[event.data](event.events)",
"def wait_for_enforce_security_event(self, expected_enforce_security_event):\n pass",
"def wait(self):\n while not self.done:\n self.device._handle_events(1000)",
"def wait_finish(self):\r\n self.proc.join()",
"def wait_start_success(self) -> None:\n try:\n if self.uses_before_pod is not None:\n self.uses_before_pod.wait_start_success()\n if self.uses_after_pod is not None:\n self.uses_after_pod.wait_start_success()\n if self.head_pod is not None:\n self.head_pod.wait_start_success()\n if self.gateway_pod is not None:\n self.gateway_pod.wait_start_success()\n for shard_id in self.shards:\n self.shards[shard_id].wait_start_success()\n except:\n self.close()\n raise",
"def _checkpoint(self,):\n self.outstanding.wait()",
"def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")",
"def _wait(self,):\n #modlogger.debug( \"%s: waiting\"%self)\n self.closing = True\n with self.not_complete_lock:\n if not self.not_complete: return\n self._checkpoint()",
"def wait_for_complete(self, valid_final_states):\n # BUG: if itself is valid state, then returns immediately\n rr = rospy.Rate(1.0)\n # print \"valid states here\", valid_final_states\n #\n done = False\n preempted = False\n success = True\n while not done and not rospy.is_shutdown():\n if not self._as.is_active():\n preempted = True\n break\n done = self.check_status(valid_final_states)\n # if done:\n # print \"Reached a valid final state\"\n # else:\n # print \"following final states not reached\"\n # print valid_final_states\n done = done or self._fsm_aborted\n rr.sleep()\n if self._fsm_aborted:\n # reset abort status\n success = False\n self._fsm_aborted = False\n return success, preempted",
"def wait(self):\n self.event.wait()",
"async def wait_until_done(self) -> None:\n ...",
"def wait(self):\n time.sleep(self.next())",
"def _wait_for_completion(self):\n if self.do_timing:\n self.timer.start(\"Running.\")\n\n while self.state != State.COMPLETED:\n self._update_state()\n\n if self.do_timing:\n self.timer.stop()",
"def espera_ped(self):\n self.e_esperar.wait()\n self.e_esperar.clear()",
"def _wait(self):\n conn = None\n try:\n conn = libvirt.open(\"qemu:///system\")\n while True:\n time.sleep(10)\n try:\n state = conn.lookupByName(self.domain).info()[0]\n except (libvirt.libvirtError, TypeError, IndexError):\n break\n if state in [4, 5, 6]: # crashed or shutdown\n break\n finally:\n if conn is not None:\n conn.close()",
"def wait(self: AutoScaler) -> AutoScalerState:\n if self.phase is AutoScalerPhase.STEADY:\n waited = datetime.now() - self.last_check\n if waited > self.wait_check:\n return AutoScalerState.CHECK\n else:\n log.trace(f'Autoscale wait ({timedelta(seconds=round(waited.total_seconds()))})')\n time.sleep(1)\n return AutoScalerState.WAIT\n else:\n return AutoScalerState.FINAL",
"def wait(self):\n pass",
"def wait(self):\n pass",
"def wait():\n pass",
"def wait_until_finished(self):\n for processor in self._processors.values():\n while not processor.done:\n time.sleep(0.1)",
"def wait_for_fixation_end(self):\n\n\t\t# # # # #\n\t\t# EyeTribe method\n\n\t\tif self.eventdetection == 'native':\n\t\t\t\n\t\t\t# print warning, since EyeTribe does not have a blink detection\n\t\t\t# built into their API\n\t\t\t\n\t\t\tprint(\"WARNING! 'native' event detection has been selected, \\\n\t\t\t\tbut EyeTribe does not offer fixation detection; \\\n\t\t\t\tPyGaze algorithm will be used\")\n\n\t\t# # # # #\n\t\t# PyGaze method\n\t\t\t\n\t\t# function assumes that a 'fixation' has ended when a deviation of more than fixtresh\n\t\t# from the initial 'fixation' position has been detected\n\t\t\n\t\t# get starting time and position\n\t\tstime, spos = self.wait_for_fixation_start()\n\t\t\n\t\t# loop until fixation has ended\n\t\twhile True:\n\t\t\t# get new sample\n\t\t\tnpos = self.sample() # get newest sample\n\t\t\t# check if sample is valid\n\t\t\tif self.is_valid_sample(npos):\n\t\t\t\t# check if sample deviates to much from starting position\n\t\t\t\tif (npos[0]-spos[0])**2 + (npos[1]-spos[1])**2 > self.pxfixtresh**2: # Pythagoras\n\t\t\t\t\t# break loop if deviation is too high\n\t\t\t\t\tbreak\n\n\t\treturn clock.get_time(), spos"
] | [
"0.61228937",
"0.57435817",
"0.5658563",
"0.55395555",
"0.55395555",
"0.55395555",
"0.55395555",
"0.5474974",
"0.54738116",
"0.5424017",
"0.5409089",
"0.52819955",
"0.52727515",
"0.52532136",
"0.52388525",
"0.52307796",
"0.5214139",
"0.52131623",
"0.5210483",
"0.52062255",
"0.51820964",
"0.5159536",
"0.51296973",
"0.5116232",
"0.51002413",
"0.5082142",
"0.5082142",
"0.50786376",
"0.5073389",
"0.506334"
] | 0.7065584 | 0 |
Invoke sparsify action on disk. | def sparsify_disk(disk_id, storage_domain_name, wait=True):
if not do_disk_action(
'sparsify', disk_id=disk_id, target_domain=storage_domain_name,
wait=wait
):
return False
return wait_for_sparsify_event(disk_id) if wait else True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sparsify_model(path_to_model, sparsified_model_dump_path):\n sparsity_levels = [sl / 10 for sl in range(0, 10)]\n sparsity_levels += [0.91, 0.92, 0.93, 0.94, 0.95, 0.96, 0.97, 0.98, 0.99, 1.0]\n\n norms = [\"L1\", \"L2\"]\n sparse_block_shapes = [(1, 1), (1, 4)]\n\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n\n print(\"Running for sparsity levels - \", sparsity_levels)\n print(\"Running for sparse block shapes - \", sparse_block_shapes)\n print(\"Running for norms - \", norms)\n\n orig_model = get_dlrm_model()\n saved_state = torch.load(path_to_model, map_location=device)\n orig_model.load_state_dict(saved_state['state_dict'])\n\n orig_model = orig_model.to(device)\n step_time_dict = {}\n\n stat_dict: Dict[str, List] = {'norm': [], 'sparse_block_shape': [], 'sparsity_level': [],\n 'step_time_sec': [], 'zip_file_size': [], 'path': []}\n for norm in norms:\n for sbs in sparse_block_shapes:\n if norm == \"L2\" and sbs == (1, 1):\n continue\n for sl in sparsity_levels:\n model = copy.deepcopy(orig_model)\n sparsifier = create_attach_sparsifier(model, sparse_block_shape=sbs, norm=norm, sparsity_level=sl)\n\n t1 = time.time()\n sparsifier.step()\n t2 = time.time()\n\n step_time = t2 - t1\n norm_sl = f\"{norm}_{sbs}_{sl}\"\n print(f\"Step Time for {norm_sl}=: {step_time} s\")\n\n step_time_dict[norm_sl] = step_time\n\n sparsifier.squash_mask()\n\n saved_state['state_dict'] = model.state_dict()\n file_name = f'criteo_model_norm={norm}_sl={sl}.ckpt'\n state_path, file_size = save_model_states(saved_state, sparsified_model_dump_path, file_name, sbs, norm=norm)\n\n stat_dict['norm'].append(norm)\n stat_dict['sparse_block_shape'].append(sbs)\n stat_dict['sparsity_level'].append(sl)\n stat_dict['step_time_sec'].append(step_time)\n stat_dict['zip_file_size'].append(file_size)\n stat_dict['path'].append(state_path)\n\n df = pd.DataFrame(stat_dict)\n filename = 'sparse_model_metadata.csv'\n df.to_csv(filename, index=False)\n\n print(f\"Saved sparsified metadata file in {filename}\")",
"def sparsify(f, arg_types, sparse_rules=None):\n os.environ[\"STREE_PYTHON_FALLBACK\"] = \"1\"\n tree = SymbolTree.create(f)\n handler = tree.get_handler()\n sparse_rules = sparse_rules or {}\n sparsify_tree(handler, arg_types, sparse_rules, f)\n os.unsetenv(\"STREE_PYTHON_FALLBACK\")\n return tree.get_network()",
"def sparsify(self, state):\n print(\"running L0 projection-based (unstructured) sparsification. \\n \")\n model = state.model\n masks = self.get_masks(model)\n self.apply_masks(model, masks)",
"def convert_sparse(s, f, dry_run=ARGS.dry_run):\n result = None\n cmd = ['/usr/bin/hdiutil', 'convert', '-ov', '-quiet', str(s), '-format', 'UDZO', '-o', str(f)]\n\n if not dry_run:\n LOG.info('Converting {sparseimage}'.format(sparseimage=s))\n # Eject first\n eject(silent=True)\n _p = subprocess.run(cmd, capture_output=True, encoding='utf-8')\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n LOG.info('Created {dmg}'.format(dmg=f))\n result = Path(f)\n else:\n LOG.info(_p.stderr.strip())\n\n return result",
"def wait_for_sparsify_event(disk_id, success=True):\n import art.rhevm_api.tests_lib.low_level.events as ll_events\n disk_name = get_disk_obj(disk_alias=disk_id, attribute='id').get_name()\n start_sparsify_query = \"\\\"Started to sparsify %s\\\"\" % disk_name\n finished_sparsify_query = (\n \"%s sparsified successfully\" % disk_name if success else\n \"Failed to sparsify %s\" % disk_name\n )\n last_event_id = ll_events.get_max_event_id(start_sparsify_query)\n return ll_events.wait_for_event(\n query=finished_sparsify_query, start_id=last_event_id\n )",
"def submission():\n\n # @ToDo: Something better than this crude check\n if not auth.s3_logged_in():\n auth.permission.fail()\n\n from io import StringIO\n import cgi\n from lxml import etree\n\n source = request.post_vars.get(\"xml_submission_file\", None)\n if isinstance(source, cgi.FieldStorage):\n if source.filename:\n xmlinput = source.file\n else:\n xmlinput = source.value\n\n if isinstance(xmlinput, str):\n xmlinput = StringIO(xmlinput)\n elif request.env.request_method == \"HEAD\":\n raise HTTP(204)\n else:\n raise HTTP(400, \"Invalid Request: Expected an XForm\")\n\n tree = etree.parse(xmlinput)\n tablename = tree.getroot().tag\n\n resource = s3db.resource(tablename)\n\n stylesheet = os.path.join(request.folder, \"static\", \"formats\", \"odk\",\n \"import.xsl\")\n\n try:\n result = resource.import_xml(source=tree, stylesheet=stylesheet)\n except (IOError, SyntaxError):\n raise HTTP(500, \"Internal server error\")\n\n # Parse response\n status = json.loads(result)[\"statuscode\"]\n\n if status == \"200\":\n r = HTTP(201, \"Saved\") # ODK Collect only accepts 201\n r.headers[\"Location\"] = request.env.http_host\n raise r\n else:\n raise HTTP(status, result)",
"def runfile(self, s):\n return self.shell.ex(load_wrap(s, attach=False))",
"def gen(self, sid):\n\n\t\tcharacters = self.parser.parse(self.cm.get_booknlp_fpath(sid))\n\t\tself.parser.save(characters, self.get_fpath(sid))",
"def dispatch(self, filename):\n\n parser = self.find_parser(filename)\n if parser:\n parser.tell({\n 'command': 'parse',\n 'filename': filename\n })\n else:\n log.info('No parser for filename: {}'.format(filename))",
"def run(self):\n self.cancelled = False\n # get version from IDF object or by parsing the IDF file for it\n\n # Move files into place\n self.epw = self.idf.epw.copy(self.run_dir / \"in.epw\").expand()\n self.idfname = Path(self.idf.savecopy(self.run_dir / \"in.idf\")).expand()\n self.idd = self.idf.iddname.copy(self.run_dir).expand()\n\n # Get executable using shutil.which (determines the extension based on\n # the platform, eg: .exe. And copy the executable to tmp\n slab_exe = shutil.which(\"Slab\", path=self.eplus_home)\n if slab_exe is None:\n log(\n f\"The Slab program could not be found at \" f\"'{self.eplus_home}'\",\n lg.WARNING,\n )\n return\n self.slabexe = Path(slab_exe).copy(self.run_dir)\n self.slabidd = (self.eplus_home / \"SlabGHT.idd\").copy(self.run_dir)\n\n # The GHTin.idf file is copied from the self.include list (added by\n # ExpandObjects. If self.include is empty, no need to run Slab.\n self.include = [Path(file).copy(self.run_dir) for file in self.idf.include]\n if not self.include:\n self.cleanup_callback()\n return\n\n # Run Slab Program\n with logging_redirect_tqdm(loggers=[lg.getLogger(self.idf.name)]):\n with tqdm(\n unit_scale=True,\n miniters=1,\n desc=f\"RunSlab #{self.idf.position}-{self.idf.name}\",\n position=self.idf.position,\n ) as progress:\n\n self.p = subprocess.Popen(\n self.cmd,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n shell=True, # can use shell\n cwd=self.run_dir.abspath(),\n )\n start_time = time.time()\n self.msg_callback(\"Begin Slab Temperature Calculation processing . . .\")\n for line in self.p.stdout:\n self.msg_callback(line.decode(\"utf-8\").strip(\"\\n\"))\n progress.update()\n\n # We explicitly close stdout\n self.p.stdout.close()\n\n # Wait for process to complete\n self.p.wait()\n\n # Communicate callbacks\n if self.cancelled:\n self.msg_callback(\"RunSlab cancelled\")\n # self.cancelled_callback(self.std_out, self.std_err)\n else:\n if self.p.returncode == 0:\n self.msg_callback(\n \"RunSlab completed in {:,.2f} seconds\".format(\n time.time() - start_time\n )\n )\n self.success_callback()\n for line in self.p.stderr:\n self.msg_callback(line.decode(\"utf-8\"))\n else:\n self.msg_callback(\"RunSlab failed\")\n self.failure_callback()",
"def dispatch():\n parser = argparse.ArgumentParser(\n description=\"Used for managing s3 bucket of assets,\"\n \" primarily Python Cheeseshop management.\")\n subparsers = parser.add_subparsers()\n\n # ... push [-h] key file\n sub = subparsers.add_parser(\"push\",\n help=\"Deploy file to bucket, assigning it the\"\n \" specified key. Overwrites any existing item\"\n \" with the same key.\")\n sub.set_defaults(func=push_to_s3)\n sub.add_argument('key')\n sub.add_argument('file', type=file)\n\n # ... install [-h] [-r FILENAME] [-f]\n sub = subparsers.add_parser(\"install\",\n help=\"Update cheeseshop with packages,\"\n \" and update index.\")\n sub.set_defaults(func=upload_to_cheeseshop)\n sub.add_argument('packages', metavar=\"PACKAGE_NAMES\", nargs=\"*\",\n help=('each package is a package name'\n ' (Django or Django==1.3.1) or a'\n ' path to a file (dir/Django-1.3.1.tgz)'))\n sub.add_argument('-r', '--requirement', type=file, metavar=\"FILENAME\",\n help=('assure packages in requirements file are present'\n ', uploading from pypi as needed'))\n sub.add_argument('-U', '--upgrade', action='store_true',\n help='force upload, overwriting existing packages')\n\n # ... index\n sub = subparsers.add_parser(\"index\",\n help=\"Re-construct cheeseshop's index.\")\n sub.set_defaults(func=build_cheeseshop_index)\n\n kwargs = vars(parser.parse_args())\n f = kwargs.pop('func')\n f(**kwargs)",
"def segment_from_command_line(args):\n\n input_file = BedTool(args.input)\n # Segment the input file\n return segment(input_file, args.method, p0=args.p0, prior=args.prior)",
"def process_file(fname):\n # get the channel name\n # this is dirty code, come up with a better solution\n # e.g. storing the header info in the h5 file attrs\n man = SortingManagerGrouped(fname)\n try:\n entity = man.header['AcqEntName']\n except TypeError:\n entity = 'unknown'\n ncs_fname = os.path.basename(fname)[5:-3]\n print(ncs_fname)\n plot_fname = 'spikes_{}_{}'.format(entity, ncs_fname)\n save_fname = os.path.join(OVERVIEW, plot_fname)\n spikes_overview(ncs_fname, save_fname)",
"def run(self, parsed):",
"def cmd_stor(args):",
"def run(self):\n\n self.load_file()\n self.cat_to_num()\n self.split()",
"def run(self):\n\n\t\tfile = \"\"\n\t\tif self.args:\n\t\t\tfile = self.args[0]\n\n\t\ttry:\n\t\t\tif file:\n\t\t\t\tif self.doFile:\n\t\t\t\t\tcyphertxt = open(file,'r').read()\n\t\t\t\t\tcontents, meta = self.decrypt(cyphertxt)\n\t\t\t\telse:\n\t\t\t\t\tcontents, meta = self.get(file)\n\t\t\t\tprint contents\n\t\t\t\tprint \"### METADATA ###\"\n\t\t\t\tprint 'Name: %s\\tMode:%s\\tOwner=%s' % \\\n\t\t\t\t(meta['name'], meta['mode'], meta['owner'])\n\t\t\telse:\n\t\t\t\tfiles = self.find()\n\t\t\t\tfor file in sorted(files.keys()):\n\t\t\t\t\tif not self.getall:\n\t\t\t\t\t\tprint file\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tcontents, meta = self.get(file)\n\t\t\t\t\tself.write(contents, meta, files[file])\n\t\t\t\t\tprint \"Wrote:\", meta['name']\n\t\t\t\t\t# Support for post() function in the filter.\n\t\t\t\t\t# This function is run after the file is obtained\n\t\t\t\t\t# and written to disk.\n\t\t\t\t\t# Check to see if post() function exists in plugin\n\t\t\t\t\ttry:\n\t\t\t\t\t\tf = getattr(self.plugin, 'post')\n\t\t\t\t\t\tf()\n\t\t\t\t\t# If not, don't worry about it.\n\t\t\t\t\texcept AttributeError:\n\t\t\t\t\t\tpass\n\t\t\t\t\t# If it does exist, but something else goes wrong\n\t\t\t\t\t# complain to the admin about it.\n\t\t\t\t\texcept:\n\t\t\t\t\t\tsys.stderr.write(\"Could not complete 'post()' \" +\\\n\t\t\t\t\t\t\t\"for %s\\n\" % file)\n\t\t\t\t\t\tsys.stderr.write(\"%s: %s\\n\" \\\n\t\t\t\t\t\t\t% (sys.exc_info()[0], sys.exc_info()[1]))\n\t\texcept ValueError:\n\t\t\traise\n\t\t\t# Thrown by connect() called by get().\n\t\t\tsys.stderr.write(\"Error: I have no master servers, \"\n\t\t\t\t\"check %s.\\n\" % (self.config.getFile()))\n\t\t\tsys.exit(-1)\n\n\t\texcept Error411, msg:\n\t\t\tsys.stderr.write(\"Error: %s\\n\" % msg)\n\t\t\tsys.exit(-1)",
"def perform_step(file_contents, step):\n assert type(file_contents) is not bytes\n xmldoc = parse(file_contents)\n return step(xmldoc)",
"def execute(self):\n try:\n return self.spark.read.format(self.file_format).load(self.location)\n except AnalysisException as exp:\n raise",
"def analyze(filename: str, workers: int, count: int, svdk: int, save: bool, output: bool) -> None:\n documents, doccount = open_documents(filename, count, workers)\n print('Program Start. Loaded Data. Time Elapsed: {}\\n'.format(time.clock()))\n\n if len(documents) > 100:\n output = False\n\n if output:\n print(documents)\n\n words = get_unique_words(documents, workers)\n wordcount = len(words.keys())\n topwords = ','.join([w for w, s in sorted(words.items(),\n key=lambda tup: -tup[1]['freq'])[:20]])\n\n print(('Found Word Frequencies\\n'\n '\\n{} Documents (m) by {} Unique Words (n)\\n\\n'\n 'Top 20 Most Frequent Words:{}\\n'\n 'Time Elapsed: {}\\n').format(doccount,\n wordcount,\n topwords,\n time.clock()))\n\n if output:\n for word, freqs in words.items():\n print('{} => {}'.format(word, freqs))\n\n docmatrix, documents = get_sparse_matrix(documents, words, workers)\n print('Calculated Sparse Matrix\\nTime Elapsed: {}\\n'.format(time.clock()))\n\n if output:\n docs = docmatrix.T\n beforecomparisons = np.zeros((len(documents), len(documents)))\n for i in range(len(documents)):\n for j in range(len(documents)):\n beforecomparisons[i, j] = 1 - spatial.distance.cosine(docs[:, i].todense(), docs[:, j].todense())\n print(beforecomparisons)\n\n u, s, vt = decomposition(docmatrix, svdk)\n print('Calculated SVD Decomposition\\nTime Elapsed: {}'.format(time.clock()))\n\n if output:\n docs = np.diag(s) @ vt\n aftercomparisons = np.zeros((len(documents), len(documents)))\n for i in range(len(documents)):\n for j in range(len(documents)):\n aftercomparisons[i, j] = 1 - spatial.distance.cosine(docs[:, i], docs[:, j])\n print(aftercomparisons)\n\n # values = []\n # for i in documents:\n # values.append(doc_comparisons(u, s, vt, documents, output, docmatrix.todense().T))\n\n # with open('/Users/tp/dev/ml/articles/Python-LSA/output.txt', 'r') as file:\n # file.write(values)\n\n while True:\n try:\n selection = input('(w)ords or (d)ocuments? ').lower()\n if selection == 'w':\n matrix_comparison(u, s, vt, words, documents, output)\n elif selection == 'd':\n doc_comparisons(u, s, vt, documents, output, docmatrix.todense().T)\n elif selection == 'exit':\n break\n except (KeyboardInterrupt, EOFError):\n break",
"def _doParseSolution(self, st, stdout):\n raise Exception(\"Not implemented\")",
"def process(self):\n self.extract()\n self.transform()\n self.load()",
"def skesa_assemble(self):\n with progressbar(self.metadata) as bar:\n for sample in bar:\n # Initialise the assembly command\n sample.commands.assemble = str()\n try:\n if sample.general.trimmedcorrectedfastqfiles:\n # If the sample is a pure isolate, assemble it. Otherwise, run the pre-metagenome pipeline\n try:\n status = sample.run.Description\n except AttributeError:\n status = 'unknown'\n if status == 'metagenome':\n self.merge(sample)\n else:\n # Set the output directory\n sample.general.assembly_output = os.path.join(sample.general.outputdirectory,\n 'assembly_output')\n make_path(sample.general.assembly_output)\n sample.general.assemblyfile = os.path.join(sample.general.assembly_output,\n '{name}_unfiltered.fasta'\n .format(name=sample.name))\n sample.general.bestassemblyfile = os.path.join(sample.general.assembly_output,\n '{name}.fasta'\n .format(name=sample.name))\n fastqfiles = sample.general.trimmedcorrectedfastqfiles\n\n # Set the the forward fastq files\n sample.general.assemblyfastq = fastqfiles\n forward = fastqfiles[0]\n gz = True if '.gz' in forward else False\n # If there are two fastq files\n if len(fastqfiles) == 2:\n # Set the reverse fastq name https://github.com/ncbi/SKESA/issues/7\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--use_paired_ends --vector_percent 1 ' \\\n '--contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Same as above, but use single read settings for the assembler\n else:\n sample.commands.assemble = 'skesa --fastq {fastqfiles} --cores {threads} ' \\\n '--vector_percent 1 --contigs_out {contigs}'\\\n .format(fastqfiles=','.join(fastqfiles),\n threads=self.cpus,\n contigs=sample.general.assemblyfile)\n # Specify that the files are gzipped\n if gz:\n sample.commands.assemble += ' --gz'\n # If there are no fastq files, populate the metadata appropriately\n else:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.bestassemblyfile = 'NA'\n except AttributeError:\n sample.general.assembly_output = 'NA'\n sample.general.assemblyfastq = 'NA'\n sample.general.trimmedcorrectedfastqfiles = 'NA'\n sample.general.bestassemblyfile = 'NA'\n if sample.commands.assemble and not os.path.isfile(sample.general.assemblyfile):\n # Run the assembly\n out, err = run_subprocess(sample.commands.assemble)\n write_to_logfile(sample.commands.assemble,\n sample.commands.assemble,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)\n write_to_logfile(out,\n err,\n self.logfile,\n sample.general.logout,\n sample.general.logerr,\n None,\n None)",
"def run(fieldsep, progfile, verbose, command, filename):\n try:\n do_config(fieldsep, verbose)\n\n content = get_content(filename)\n for cmd in get_cmds(progfile, command):\n process_content(cmd, content)\n\n except Exception, e:\n logging.error(unicode(e))",
"def create_sparse(f, vol=DMG_VOLUME_NAME, fs=DMG_DEFAULT_FS, mountpoint=DMG_MOUNT, dry_run=ARGS.dry_run):\n result = None\n sparseimage = Path('{f}.sparseimage'.format(f=f)) if not str(f).endswith('.sparseimage') else Path(f)\n\n if not isinstance(mountpoint, Path):\n mountpoint = Path(mountpoint)\n\n if not dry_run:\n if fs not in VALID_DMG_FS:\n raise TypeError\n\n # If the sparseimage exists and is already mounted\n if sparseimage.exists() and mountpoint.exists():\n LOG.warning('Unmounting existing mount point for {mount}'.format(mount=mountpoint))\n eject(silent=True)\n result = mount(sparseimage, mountpoint)\n else:\n cmd = ['/usr/bin/hdiutil', 'create', '-ov', '-plist', '-volname', vol, '-fs', fs, '-attach', '-type', 'SPARSE', str(f)]\n _p = subprocess.run(cmd, capture_output=True)\n LOG.debug('{cmd} ({returncode})'.format(cmd=' '.join([str(x) for x in cmd]), returncode=_p.returncode))\n\n if _p.returncode == 0:\n LOG.warning('Created temporary sparseimage for {img}'.format(img=f))\n _stdout = plist.read_string(_p.stdout)\n _entities = _stdout.get('system-entities')\n # _image_path = _stdout.get('image-components')[0] # This may not always be the sparseimage filename?\n\n if _entities:\n result = mount_device(_entities)\n LOG.warning('Mounted sparse image to {mountpoint}'.format(mountpoint=result))\n else:\n LOG.info(_p.stderr.decode('utf-8').strip())\n sys.exit(88)\n else:\n LOG.warning('Create {sparseimage} ({volume}i, {fs}) and mount to {mountpoint}'.format(sparseimage=f, volume=vol, fs=fs, mountpoint=mountpoint))\n\n if result and sparseimage and sparseimage not in result:\n result = (sparseimage, result[0], result[1])\n\n return result",
"def pre_process(filename, check=True):\n ntokens = 0\n if check:\n lines = []\n for line in codecs.open(filename, \"r\", \"utf-8\"):\n # lines += line.replace(\"\\n\",\"\").split() + [\"<eos>\"]\n lines += line.split() + [\"<eos>\"]\n ntokens = len(lines)\n\n lines = []\n for line in codecs.open(filename, \"r\", \"utf-8\"):\n lines.append(line.replace(\"@.@\", \"&numb\") + \"<eos>\")\n\n split_lines = [nltk.sent_tokenize(line.strip())\n for line in lines]\n\n new = []\n for line in split_lines:\n if len(line) > 1:\n new_lines = line[:-2]\n new_line = line[-2] + \" \" + line[-1]\n new_lines += [new_line]\n else:\n new_lines = line\n new += new_lines\n\n split_lines = [line.replace(\"&numb\", \"@.@\")\n for line in new]\n\n if check:\n len_new = [len(l.split()) for l in split_lines]\n assert ntokens == sum(len_new)\n print(\"# of tokens in the original file = {0}\\n\"\n \"# of tokens after pre-processing = {1}\\n\".format(\n ntokens, sum(len_new)))\n\n output = \"{0}.sents\".format(filename)\n print(\"Saving pre-processed file to {0}\\n\".format(output))\n with codecs.open(output, \"w\", \"utf-8\") as f:\n for line in split_lines:\n f.write(\"{0}\\n\".format(line.replace(\"\\n\", \"\")))",
"def process(input_path, output_path):\n save_plist(fix_data(load_yaml(input_path)), output_path)",
"def cli(yamlfile, **args):\n print(ShExGenerator(yamlfile, **args).serialize(**args))",
"def execute(self):\n cwd = self.fm.thisdir\n marked_files = cwd.get_selection()\n\n if not marked_files:\n return\n\n def refresh(_):\n cwd = self.fm.get_directory(original_path)\n cwd.load_content()\n\n original_path = cwd.path\n parts = self.line.split()\n au_flags = parts[1:]\n\n descr = \"compressing files in: \" + os.path.basename(parts[1])\n obj = CommandLoader(args=['apack'] + au_flags + \\\n [os.path.relpath(f.path, cwd.path) for f in marked_files], descr=descr)\n\n obj.signal_bind('after', refresh)\n self.fm.loader.add(obj)",
"def _process(self, f):\n n = parse(f)\n l = self._layout = Layout(n)\n l.layout(solve=False)\n return n"
] | [
"0.54346603",
"0.51681167",
"0.51676154",
"0.47499394",
"0.46290654",
"0.45078585",
"0.44578275",
"0.44389233",
"0.44310507",
"0.44083863",
"0.44033685",
"0.43932283",
"0.43796688",
"0.43533888",
"0.43257746",
"0.43140608",
"0.42917594",
"0.427881",
"0.426788",
"0.4243454",
"0.42221695",
"0.42055702",
"0.4197392",
"0.41860813",
"0.41784853",
"0.41611776",
"0.41611204",
"0.41578373",
"0.414715",
"0.41337478"
] | 0.5380593 | 1 |
Refresh the routing table | def refresh(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _refresh_table(self):\n self._column_selected()\n self._table_selected()\n self._column_selection_change()\n self.refresh_column_list()\n self.refresh_table_list()\n self.refresh_table()",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"def refresh(self) -> None:\n pass",
"def post_route_table_update(self, resource_id, resource_dict):\n pass",
"def Refresh(self):\n pass",
"def refresh(self):\n self.__refresh()",
"def reload(self):",
"def reload(self):",
"def RoutingInterfaceNotificationRefresh(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)",
"def pre_route_table_update(self, resource_id, resource_dict):\n pass",
"def pre_interface_route_table_update(self, resource_id, resource_dict):\n pass",
"def refresh_view():\n pass",
"def post_interface_route_table_update(self, resource_id, resource_dict):\n pass",
"def reload(self):\n\n pass",
"def update_host_routes(self, config, cache):\n db = cache.get_or_create('host_routes', lambda: {})\n for net in config.networks:\n\n # For each subnet...\n for subnet in net.subnets:\n cidr = str(subnet.cidr)\n\n # determine the set of previously written routes for this cidr\n if cidr not in db:\n db[cidr] = set()\n\n current = db[cidr]\n\n # build a set of new routes for this cidr\n latest = set()\n for r in subnet.host_routes:\n latest.add((r.destination, r.next_hop))\n\n # If the set of previously written routes contains routes that\n # aren't defined in the new config, run commands to delete them\n for x in current - latest:\n if self._alter_route(net.interface.ifname, 'del', *x):\n current.remove(x)\n\n # If the new config contains routes that aren't defined in the\n # set of previously written routes, run commands to add them\n for x in latest - current:\n if self._alter_route(net.interface.ifname, 'add', *x):\n current.add(x)\n\n if not current:\n del db[cidr]\n\n cache.set('host_routes', db)",
"def refresh(self):\n self._refresh_method()",
"def refreshTable(self):\n ds = []\n for id in self.protocol.getRefreshIDs():\n node = Node(id)\n nearest = self.protocol.router.findNeighbors(node, self.alpha)\n spider = NodeSpiderCrawl(self.protocol, node, nearest)\n ds.append(spider.find())\n\n def republishKeys(_):\n ds = []\n # Republish keys older than one hour\n for key, value in self.storage.iteritemsOlderThan(3600):\n ds.append(self.set(key, value))\n return defer.gatherResults(ds)\n\n d = defer.gatherResults(ds)\n d.addCallback(republishKeys)\n d.addErrback(self.onError)\n return d",
"def refresh(self):\n raise NotImplementedError",
"def refresh(self):\n raise NotImplementedError",
"def refresh(self):\n self.Refresh()",
"def __init__(self):\n self.routingTable = dict()",
"def refresh(self):\n raise NotImplementedError(\"To be implemented\")",
"def refresh():\n global tree\n tree = build_tree()\n tree.order_by_create()\n return index()",
"def _refreshActionTriggeredSlot(self):\r\n \r\n self._controller.model.refresh(self._controller.model.activeIndex)",
"def refreshTree(self):\n\n # Gets default rows\n rows = getsAllClients()\n\n # Puts and displays rows in tree\n self.displayTreeRows(rows)",
"def post_route_table_read(self, resource_id, resource_dict):\n pass",
"def refreshTradeRoutes(self, systemID):\n try:\n # get empire update\n self.getTradeRoutes()\n \n # show trade routes if not already shown\n if self.game.myEmpire['viewTradeRoutes'] == 0:\n self.toggleTradeRoutes()\n else:\n self.createTradeRouteSims()\n \n # refresh system panel if still hovering over it\n if self.systemInfo.currentID == systemID:\n self.systemInfo.panel.populate(self.systemInfo.panel.myEmpireDict, self.systemInfo.panel.mySystemDict)\n except:\n self.modeMsgBox('refreshTradeRoutes error ')",
"def reload(self):\n self._populate(self.hierarchy[-1])",
"def set_routing(self, rinfo):\n\n self.routing = [ self.Routing(*r) for r in rinfo ]"
] | [
"0.62884986",
"0.62584835",
"0.62584835",
"0.62584835",
"0.62575024",
"0.62315863",
"0.62027854",
"0.6158251",
"0.6158251",
"0.61570215",
"0.61530703",
"0.6113206",
"0.6086956",
"0.6081516",
"0.60719234",
"0.6031992",
"0.5962185",
"0.5955436",
"0.59503174",
"0.59503174",
"0.59382284",
"0.59358096",
"0.59106684",
"0.59058493",
"0.58972013",
"0.5879856",
"0.584784",
"0.581997",
"0.57974017",
"0.579638"
] | 0.638114 | 1 |
Attempt to add the given node to the routing table. | def addNode(self, node: dht.node.Node):
bucket = self._findBucket(node)
if bucket == None:
raise Exception("Found no bucket for given id")
if not node in bucket:
# We do not have this node on our routing table yet;
# attempt to add it.
if len(bucket) < MAX_NODES_PER_BUCKET:
bucket.append(node)
else:
if bucket.inRange(myID):
# Our own node's ID is in the appropriate bucket's range,
# split the bucket and recursively attempt to add the node.
self._splitBucket(bucket)
self.addNode(node)
else:
# TODO: handle this
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_node(self, node):",
"def _add_node(self, node: int) -> None:\r\n self.nodes.add(node)",
"def add_node (self, node):\n self.network.add_node(node.id)\n self.network.node[node.id] = node",
"def add(self, nodeLoc):\n self.table[self.getHashIndex(nodeLoc)] = True",
"def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True",
"def add_node (self, node):\n raise NotImplementedError",
"def add_node(self, node):\n self._nodes[node.id] = node\n self._clear_cache()",
"def add_node(self, node):\n if node not in self.nodes:\n self.nodes.append(node)",
"def insert_node(self, node):\n if self._is_node_reserved(node):\n return False\n\n # Put node in map\n self._node_map[node.get_id()] = node\n return True",
"def add_node(self, node):\n \n if node in self.node_set:\n return \n \n self.num_node = self.num_node + 1\n self.node_set.add(node)\n self.prefix[node] = {}\n self.suffix[node] = {}",
"def add_node(self, node):\n self.nodes.append(node)",
"def add_node(self, node):\n self.nodes.add(node)",
"def add_node(self, node):\n self.nodes.append(node)\n self.edges[node.identifier] = {}\n self._id2node[node.identifier] = node\n node.parent = None",
"def add(self, node):\n if str(node.getPosition()) in self._history:\n # duplicate entry\n return\n self._history[str(node.getPosition())] = True\n self._insort(node)",
"def add_node(self, node, parent):\n if node not in self.map.edges:\n self.map.edges[node] = []\n if parent not in self.map.edges:\n self.map.edges[parent] = [node]\n else:\n self.map.edges[parent].append(node)",
"def add_node(self, node: Node) -> None:\n\t\t# Check for conflicts with current nodes; iterate over nodes\n\t\tfor index in range(len(self.nodes)):\n\t\t\t# Exit if comparison fails. Node can update itself from the compare() method\n\t\t\tif not self.nodes[index].compare(node):\n\t\t\t\treturn\n\n\t\t# Add the Node if no conflicts\n\t\tself.nodes.append(node)",
"def add_node(self, node):\n if node not in self.nodes:\n self._nodes.append(node)",
"def add_node(self, node):\n self.nodeset.add(node) # add the input node to the nodeset\n\n self.__check_validity() # check if graph is valid - throws exception if not",
"def add_node(graph, node):\n if node not in graph:\n graph[node] = [0] # 0 = number of arcs coming into this node.",
"def add_node(self, node):\n self._nodes.add(node)",
"def add_node(self, node):\n self._nodes.add(node)",
"def add_node(self, node):\n index = self._node_index.setdefault(node.ntype, dict())\n if node.ext_id not in index:\n index.setdefault(node.ext_id, node)\n self._type_list.setdefault(node.ntype, list()).append(node)",
"def add_node(self, node: Node) -> None:\n assert len(\n self.network) <= 10, \"Too many nodes attempted to be placed in network\"\n self.network.append(node)",
"def add_node(self, node: Node):\n prop_str = \",\\n\".join([\"n.%s = '%s'\" % (k, v) for k, v in node.data.items()])\n query = \"\"\"\n MERGE (n:%s {id: '%s'})\n SET %s\n \"\"\" % (\n node.labels,\n norm_id(node.db_ns, node.db_id),\n prop_str,\n )\n return self.create_tx(query)",
"def add_node(self, node):\n try:\n self.dict.setdefault(node, OrderedDict())\n except (AttributeError, TypeError):\n raise \"Node Value must be hashable value\"",
"def add_node(self, node: Node):\n if node not in self.__graph_dict:\n self.__graph_dict[node] = []",
"def register_node(self, node):\n self.nodes.add(node)",
"def add_node(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node]=[]\n self.nodes.add(node)",
"def addNode (self, node):\n self.__nodes.add(node)",
"def addNode(self, node):\n if node in self.edges:\n raise ValueError('Duplicate node')\n else:\n self.edges[node] = []"
] | [
"0.7356876",
"0.71309835",
"0.7125489",
"0.7123913",
"0.7108166",
"0.7067707",
"0.70652515",
"0.7004943",
"0.70035964",
"0.6994578",
"0.6977097",
"0.69619924",
"0.69286156",
"0.69099814",
"0.68835133",
"0.688126",
"0.6873235",
"0.6852162",
"0.68456715",
"0.6845571",
"0.6845571",
"0.68408644",
"0.68100643",
"0.6800952",
"0.6783739",
"0.6778065",
"0.6767509",
"0.6764173",
"0.67607325",
"0.673013"
] | 0.7837554 | 0 |
Find the appropriate bucket for the given node | def _findBucket(self, node):
for bucket in buckets:
if bucket.inRange(node):
return bucket
#if bucket.low <= node and node <= bucket.high:
# return bucket
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __getitem__(self, node):\n j = self._hash_function(node)\n bucket = self._T[j]\n if bucket is None:\n raise KeyError(node)\n return bucket[node]",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def bucket_indexof(table, key):",
"def _get_node(self, key):\n\n index = self._hash_function(key) % self.capacity # Get the index by hashing the key\n node = self._buckets[index].contains(key) # Get the node with the key (if it exists)\n return node",
"def get_bucket(aMap, key):\n\t#uses the hash_key to give us a bucket where the key COULD be in\n\t#since it's possible that we'd get the same hash_key for two diff values\n\tbucket_id = hash_key(aMap, key)\n\treturn aMap[bucket_id]",
"def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None",
"def find_bucket(parent_dir, val):\n candidates = glob(parent_dir + '/*/')\n bucket = None\n min_distance = 99\n for c in sorted([os.path.basename(c.strip(os.path.sep))\n for c in candidates], key=lambda x: x.strip('[]').lower()):\n distance = dircmp(val.lower(), c.lower())\n if 0 <= distance <= min_distance:\n bucket = c\n min_distance = distance\n\n if not bucket:\n logger.warning(\"Found no folder defining a compatible \"\n \"range for '%s'\" % val)\n return bucket",
"def get_bucket(self, bucket):\n msg = \"get_bucket not implemented\"\n raise NotImplementedError(msg)",
"def get_bucket(aMap,key):\n\tbucket_id=hash_key(aMap,key)\n\treturn aMap[bucket_id]",
"def _splitBucket(self, bucket):\n idx = self.buckets.index(bucket)\n self.buckets.pop(idx)\n middle = int(bucket.low + (bucket.high - bucket.low)/2)\n \n bucketLow = Bucket(bucket.low, middle, bucket.refreshed)\n bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)\n \n self.buckets.append(bucketLow)\n self.buckets.append(bucketHigh)\n \n for bucket in bucket.nodes:\n if bucketLow.inRange(bucket):\n bucketLow.addNode(bucket)\n else:\n bucketHigh.addNode(bucket)\n \n return (bucketLow, bucketHigh)",
"def get(self, key):\n hash_key = self._hash_function(key) % self.capacity # returns hashed keys corresponding bucket index\n bucket = self._buckets[hash_key] # get bucket for that index\n\n current = bucket.head # set bucket.head to variable as not to override linked list\n\n while current is not None: # iterate through linked list until value is found, or returns None\n if current.key == key:\n return current.value\n current = current.next",
"def find(self, value):\n bucketNum = self.__hash(value)\n originalBucketNum = bucketNum\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n bucketNum = self.__rehash(bucketNum)\n while self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] != value and \\\n bucketNum != originalBucketNum:\n bucketNum = self.__rehash(bucketNum)\n if self.__buckets[bucketNum] is not None and self.__buckets[bucketNum] == value:\n return self.__buckets[bucketNum]\n else:\n return None",
"def get_bucket():\n return FileBucket(os.path.join(context.site.data_path, 'buckets'))",
"def bucket_for_value(self, value):\n\n # bisect.bisect_left is wrong because the buckets are of [lower, upper) form\n return bisect.bisect(self._lower_bounds, value) - 1",
"def get(self, key: int) -> int:\n hashKey = key % 1000\n if self.bucket[hashKey]:\n node = self.bucket[hashKey]\n while node:\n if node.pair[0] == key:\n return node.pair[1]\n node = node.next\n return -1",
"def getBucketFromHostname(hostname):\n\n\t# Create RE pattern from Config.host_bucket\n\tpattern = Config.Config().host_bucket % { 'bucket' : '(?P<bucket>.*)' }\n\tm = re.match(pattern, hostname)\n\tif not m:\n\t\treturn (hostname, False)\n\treturn m.groups()[0], True",
"def __getitem__(self, item):\n bucket = self._buckets[self._index(item)]\n for node in bucket.linked_list:\n bucket_object_key, bucket_object_value = node.value\n assert isinstance(bucket_object_key, BucketObject)\n assert isinstance(bucket_object_value, BucketObject)\n if bucket_object_key.load_value() == item:\n key_list_node, value_list_node = (\n self._object_to_list_node[bucket_object_key],\n self._object_to_list_node[bucket_object_value],\n )\n # update in-memory and disk linked list\n self._in_memory_objects.remove_and_append(key_list_node)\n self._in_memory_objects.remove_and_append(value_list_node)\n self._disk_objects.remove_and_append(key_list_node)\n self._disk_objects.remove_and_append(value_list_node)\n # balance memory usage\n self._balance()\n return bucket_object_value.load_value()\n raise KeyError(\"Key `{}` is not exists\".format(item))",
"def get_bucket(self, bucket_key):\n return self.buckets.get(bucket_key)",
"def get(self, element):\n bucket_index = self._bucket_index(element)\n return self.buckets[bucket_index].find(lambda value: value == element)",
"def bucket(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket\")",
"def bucket(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"bucket\")",
"def get_bucket(bucket):\n if isinstance(bucket, Bucket):\n return bucket\n if isinstance(bucket, str):\n return setup_bucket(bucket)\n else:\n raise TypeError(\"Expected bucket to be Bucket or str was %s \" % type(bucket))",
"def bucket_boundaries(self, bucket):\n\n if bucket < 0 or bucket >= self.total_buckets:\n raise IndexError('bucket %d out of range' % bucket)\n if bucket == self.total_buckets - 1:\n return (self._lower_bounds[bucket], float('Inf'))\n return (self._lower_bounds[bucket], self._lower_bounds[bucket + 1])",
"def _bucket_index(self, element):\n return hash(element) % len(self.buckets)",
"def _bucket_index(self, key):\n # Calculate the given key's hash code and transform into bucket index\n return hash(key) % len(self.buckets)",
"def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")",
"def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")",
"def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")",
"def bucket(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"bucket\")"
] | [
"0.69149685",
"0.6786792",
"0.6786792",
"0.65305567",
"0.64101285",
"0.64010876",
"0.6354371",
"0.62193334",
"0.6108624",
"0.60978067",
"0.59703565",
"0.59660417",
"0.59175164",
"0.58907247",
"0.57858413",
"0.57837915",
"0.5781839",
"0.57607853",
"0.57242984",
"0.5691928",
"0.56607735",
"0.56607735",
"0.56036055",
"0.56035817",
"0.5550285",
"0.55356723",
"0.5513252",
"0.5513252",
"0.5513252",
"0.5513252"
] | 0.86419946 | 0 |
Find the K nodes in the routing table closest to the given target ID. | def findClosestNodes(self, target: hash.hash.Hash):
# TODO: make more efficient
# See: http://stackoverflow.com/questions/30654398/implementing-find-node-on-torrent-kademlia-routing-table
nodes = []
for bucket in self.buckets:
nodes = nodes + bucket.nodes
nodes.sort(key=lambda x: nodes.distanceToHash(targetHash))
return nodes[:config.K] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def nearest_neighbor(data_set, target):\n \n tree = KDT(data_set)\n k = tree.k\n p = KDTNode(target)\n \n def KDsearch(current, target, neighbor, distance):\n \"\"\"The actual nearest neighbor search algorithm.\n Inputs:\n current (KDTNode): the node to examine.\n target (KDTNode): the target (stored in a KDTNode).\n neighbor (KDTNode): the current nearest neighbor.\n distance (float): the current minimum distance.\n \"\"\"\n \n # Base case. Return the distance and the nearest neighbor.\n if current is None:\n return neighbor, distance\n index = current.axis\n d = target - current\n if d < distance:\n distance = d\n neighbor = current\n if target < current: # Recursively search 'left'\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n # Back up if needed\n if target.data[index] + distance >= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n else: # Recursively search 'right'\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n # Back up if needed\n if target.data[index] - distance <= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n \n return neighbor, distance\n \n # Search the KD-tree.\n result = KDsearch(tree.root, p, tree.root, tree.root - p)\n return result[0].data, result[1]",
"def knearest( self, restaurant_id, set_of_restaurants, k = 7, reg = 3.0 ):\t\t\n\t\tsimilar = []\t\t\n\t\tfor other_rest_id in set_of_restaurants:\n\t\t\tif other_rest_id != restaurant_id:\n\t\t\t\tsim, n_common = self.get( other_rest_id, restaurant_id )\n\t\t\t\tsim = self.shrunk_sim( sim = sim, n_common = n_common, reg = reg )\n\t\t\t\tsimilar.append( ( other_rest_id, sim, n_common ) )\n\n\t\tsimilars = sorted( similar, key = itemgetter(1), reverse = True )\t\n\t\treturn similars[0:k]",
"def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None",
"def k_nearest_neighbor(self, k, target, current_root, k_nearest_heap = LargeHeap()):\n iter_list = [] # a stack to store iteration path\n # step1: find the 'nearest' leaf\n nearest_leaf = current_root\n while nearest_leaf is not None:\n iter_list.append(nearest_leaf) # store the path\n tt = nearest_leaf.point\n tt1 = nearest_leaf.axis\n if target[nearest_leaf.axis] < nearest_leaf.point[nearest_leaf.axis]:\n if nearest_leaf.left is not None: # then go to the left child\n nearest_leaf = nearest_leaf.left\n else:\n break\n else:\n if nearest_leaf.right is not None: # else, go to the right child\n nearest_leaf = nearest_leaf.right\n else:\n break\n while nearest_leaf.left is not None or nearest_leaf.right is not None:\n if nearest_leaf.left is not None:\n nearest_leaf = nearest_leaf.left\n iter_list.append(nearest_leaf)\n if nearest_leaf.right is not None:\n nearest_leaf = nearest_leaf.right\n iter_list.append(nearest_leaf)\n tt = nearest_leaf.point\n \"\"\"\n step2: find the k nearest by backtracking upside\n Two situations to add the point into the heap k_nearest_heap\n A. when len(k_nearest_heap) < k\n B. when dis(point, target) < current_max_dis\n \"\"\"\n # k_nearest_heap = LargeHeap() # the large heap to store the current 'nearest' neighbors\n # the max distance is actually the distance between target and the top of the heap\n '''\n current_max_dis = self.distance(target, nearest_leaf.point[:self.n_dim])\n k_nearest_heap.add(nearest_leaf, current_max_dis)\n tmp = iter_list.pop()\n '''\n former_node = nearest_leaf # the former 'current_node', to indicate whether go through this child\n while iter_list != []:\n if k_nearest_heap.len > 0:\n current_max_dis = k_nearest_heap.heaplist[0][1]\n else:\n current_max_dis = -1\n current_pointer = iter_list.pop()\n tt = current_pointer.point\n dis = self.distance(current_pointer.point[:self.n_dim], target)\n if k_nearest_heap.len < k:\n k_nearest_heap.add(current_pointer, dis)\n elif dis < current_max_dis:\n k_nearest_heap.pop()\n k_nearest_heap.add(current_pointer, dis)\n # current_max_dis = self.distance(k_nearest_heap.heaplist[0][0].point[:self.n_dim], target)\n current_max_dis = k_nearest_heap.heaplist[0][1]\n axis = current_pointer.axis\n if abs(target[axis] - current_pointer.point[axis]) >= current_max_dis:\n former_node = current_pointer\n # if not intersect with\n continue\n if current_pointer.left is not None and current_pointer.left != former_node:\n tt = current_pointer.left\n # iter_list.append(current_pointer.left)\n self.k_nearest_neighbor(k, target, current_pointer.left, k_nearest_heap)\n if current_pointer.right is not None and current_pointer.right != former_node:\n tt = current_pointer.right\n # iter_list.append(current_pointer.righat)\n self.k_nearest_neighbor(k, target, current_pointer.right, k_nearest_heap)\n former_node = current_pointer\n rlist = []\n rdis = []\n for ele in k_nearest_heap.heaplist:\n rlist.append(ele[0].point)\n rdis.append(ele[1])\n return rdis, rlist",
"def get_closest_node(self, point, n=1):\n n = min(n,len(self.nodes))#prevent index error\n if n > 1:\n tmp = zip(*self.nkdtree.query(point,n))\n return [(d, self.nkdtree_keys[i]) for d,i in tmp]\n else:\n dist, id = self.nkdtree.query(point,n)\n return [(dist, self.nkdtree_keys[id])]",
"def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)",
"def choose_k_away(k, node, precomputed=True):\n if precomputed:\n sps = np.load('./short_paths/' + str(node) + '.npy')\n else:\n sps = shortest_path(node)\n nodes = []\n for index, x in enumerate(sps):\n if x == k and index in poss_targets:\n nodes.append(index)\n return nodes",
"def k_nearest_neighbor(self, k, target, current_root, k_nearest_heap): # 1 step\r\n iter_list = [] # a stack to store iteration path # 1 step\r\n # step1: find the 'nearest' leaf\r\n nearest_leaf = current_root # 1 step\r\n while nearest_leaf is not None: # 2 steps: while, is not\r\n iter_list.append(nearest_leaf) # store the path # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n if target[nearest_leaf.axis] < nearest_leaf.point[nearest_leaf.axis]: # 6 steps: if, <, nearest_leaf.axis, nearest_leaf.point, nearest_leaf.point[],target[]\r\n if nearest_leaf.left is not None: # then go to the left child # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, nearest_leaf = \r\n else:\r\n break\r\n else:\r\n if nearest_leaf.right is not None: # else, go to the right child\r\n nearest_leaf = nearest_leaf.right\r\n else:\r\n break\r\n while nearest_leaf.left is not None or nearest_leaf.right is not None: # 6 steps: while, is not, or, is not, nearest_leaf.left, nearest_leaf.right\r\n if nearest_leaf.left is not None: # 3 steps: if, is not, nearest_leaf.left\r\n nearest_leaf = nearest_leaf.left # 2 steps: nearest_leaf.left, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n if nearest_leaf.right is not None: # 3 steps: if, is not, nearest_leaf.right\r\n nearest_leaf = nearest_leaf.right # 2 steps: nearest_leaf.right, = \r\n iter_list.append(nearest_leaf) # 1 step\r\n tt = nearest_leaf.point # 2 steps: nearest_leaf.point, tt = \r\n \"\"\"\r\n step2: find the k nearest by backtracking upside\r\n Two situations to add the point into the heap k_nearest_heap\r\n A. when len(k_nearest_heap) < k\r\n B. when dis(point, target) < current_max_dis\r\n \"\"\"\r\n # k_nearest_heap = LargeHeap() # the large heap to store the current 'nearest' neighbors\r\n # the max distance is actually the distance between target and the top of the heap\r\n '''\r\n current_max_dis = self.distance(target, nearest_leaf.point[:self.n_dim])\r\n k_nearest_heap.add(nearest_leaf, current_max_dis)\r\n tmp = iter_list.pop()\r\n '''\r\n former_node = nearest_leaf # the former 'current_node', to indicate whether go through this child\r\n # 1 step\r\n while iter_list != []: # 2 steps: while, !=\r\n if k_nearest_heap.len > 0: # 3 steps: if, k_nearest_heap.len, >\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[0], k_nearest_heap.heaplist[0][1], current_max_dis =\r\n else:\r\n current_max_dis = -1\r\n current_pointer = iter_list.pop() # 1+38 steps: 1 step - current_pointer = ; 38 steps - iter_list.pop()\r\n tt = current_pointer.point # 2 steps: current_pointer.point, tt=\r\n dis = self.distance(current_pointer.point[:self.n_dim], target) \r\n # 1+11 steps: 1 step - dis=, 11 steps - self.distance()\r\n if k_nearest_heap.len < k:\r\n k_nearest_heap.add(current_pointer, dis)\r\n elif dis < current_max_dis: # 2 steps: elif, <\r\n k_nearest_heap.pop() # 38 steps: k_nearest_heap.pop()\r\n k_nearest_heap.add(current_pointer, dis) # 30 steps: k_nearest_heap.add()\r\n # current_max_dis = self.distance(k_nearest_heap.heaplist[0][0].point[:self.n_dim], target)\r\n current_max_dis = k_nearest_heap.heaplist[0][1] # 4 steps: k_nearest_heap.heaplist, k_nearest_heap.heaplist[],k_nearest_heap.heaplist[][], current_max_dis =\r\n axis = current_pointer.axis # 2 steps: current_pointer.axis, axis = \r\n if abs(target[axis] - current_pointer.point[axis]) >= current_max_dis:\r\n # 6 steps: if, >=, target[axis], - , current_pointer.point[], abs()\r\n former_node = current_pointer # 1 step\r\n # if not intersect with\r\n continue # 1 step\r\n if current_pointer.left is not None and current_pointer.left != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.left # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.left)\r\n self.k_nearest_neighbor(k, target, current_pointer.left, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n if current_pointer.right is not None and current_pointer.right != former_node:\r\n # 5 steps: if, is not, and, current_pointer.left, !=\r\n tt = current_pointer.right # 2 steps: current_pointer.left, tt =\r\n # iter_list.append(current_pointer.righat)\r\n self.k_nearest_neighbor(k, target, current_pointer.right, k_nearest_heap)\r\n # T(n/2) steps: self.k_nearest_neighbor()\r\n former_node = current_pointer # 1 step\r\n rlist = [] # 1 step\r\n rdis = [] # 1 step\r\n for ele in k_nearest_heap.heaplist: # 2 steps: for, in \r\n rlist.append(ele[0].point) # 3 steps: append(), ele[0], ele[0].point\r\n rdis.append(ele[1]) # 2 steps: append(), ele[1]\r\n return rdis, rlist # 1 step\r",
"def k_nearest(self, pt, k):\n if k < 1:\n raise ValueError('k should be at least 1')\n result = []\n visit_ct = k_nearest(self.root, pt, k, result)\n logging.debug('Visited {0} leaf nodes'.format(visit_ct))\n return [(math.sqrt(d), item) for (d, item) in result]",
"def ClosestPrecedingFinger(self, id):\r\n for i in range(M_BITS, 0, -1):\r\n if self.IsInRange(self.fingerTable[i].Node.HashValue, self.nodeInfo.HashValue, False, id, False):\r\n return self.fingerTable[i].Node\r\n return self.nodeInfo",
"def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]",
"def get_k_shortest_paths(env: RailEnv,\n source_position: Tuple[int, int],\n source_direction: int,\n target_position=Tuple[int, int],\n k: int = 1, debug=False) -> List[Tuple[Waypoint]]:\n\n # P: set of shortest paths from s to t\n # P =empty,\n shortest_paths: List[Tuple[Waypoint]] = []\n\n # countu: number of shortest paths found to node u\n # countu = 0, for all u in V\n count = {(r, c, d): 0 for r in range(env.height) for c in range(env.width) for d in range(4)}\n\n # B is a heap data structure containing paths\n # N.B. use OrderedSet to make result deterministic!\n heap: OrderedSet[Tuple[Waypoint]] = OrderedSet()\n\n # insert path Ps = {s} into B with cost 0\n heap.add((Waypoint(source_position, source_direction),))\n\n # while B is not empty and countt < K:\n while len(heap) > 0 and len(shortest_paths) < k:\n if debug:\n print(\"iteration heap={}, shortest_paths={}\".format(heap, shortest_paths))\n # – let Pu be the shortest cost path in B with cost C\n cost = np.inf\n pu = None\n for path in heap:\n if len(path) < cost:\n pu = path\n cost = len(path)\n u: Waypoint = pu[-1]\n if debug:\n print(\" looking at pu={}\".format(pu))\n\n # – B = B − {Pu }\n heap.remove(pu)\n # – countu = countu + 1\n\n urcd = (*u.position, u.direction)\n count[urcd] += 1\n\n # – if u = t then P = P U {Pu}\n if u.position == target_position:\n if debug:\n print(\" found of length {} {}\".format(len(pu), pu))\n shortest_paths.append(pu)\n\n # – if countu ≤ K then\n # CAVEAT: do not allow for loopy paths\n elif count[urcd] <= k:\n possible_transitions = env.rail.get_transitions(*urcd)\n if debug:\n print(\" looking at neighbors of u={}, transitions are {}\".format(u, possible_transitions))\n # for each vertex v adjacent to u:\n for new_direction in range(4):\n if debug:\n print(\" looking at new_direction={}\".format(new_direction))\n if possible_transitions[new_direction]:\n new_position = get_new_position(u.position, new_direction)\n if debug:\n print(\" looking at neighbor v={}\".format((*new_position, new_direction)))\n\n v = Waypoint(position=new_position, direction=new_direction)\n # CAVEAT: do not allow for loopy paths\n if v in pu:\n continue\n\n # – let Pv be a new path with cost C + w(u, v) formed by concatenating edge (u, v) to path Pu\n pv = pu + (v,)\n # – insert Pv into B\n heap.add(pv)\n\n # return P\n return shortest_paths",
"def k_shortest_paths(\n self,\n G,\n source,\n target,\n k=1,\n weight='weight',\n ):\n\n if source == target:\n return ([0], [[source]])\n\n (length, path) = nx.single_source_dijkstra(G, source, target,\n weight=weight)\n if target not in length:\n raise nx.NetworkXNoPath('node %s not reachable from %s' % (source,\n target))\n\n lengths = [length[target]]\n paths = [path[target]]\n c = count()\n B = []\n\n # Is deep copy really required?\n # Fails due to embedded Ctype objects which can not be pickled\n # # G_original = G.copy()\n # Swapping with shallow copy...will it work?\n\n G_original = G\n if nx.is_directed(G_original):\n G = nx.DiGraph(G_original)\n else:\n G = nx.Graph(G_original)\n\n ######################################\n #TODO: wrap this up somehow\n print ''\n print term.move_up + term.move_up\n ######################################\n print 'getting K:{} paths...'.format(k),\n for i in range(1, k):\n with term.location():\n print i\n for j in range(len(paths[-1]) - 1):\n spur_node = paths[-1][j]\n root_path = (paths[-1])[:j + 1]\n\n edges_removed = []\n for c_path in paths:\n if len(c_path) > j and root_path == c_path[:j + 1]:\n u = c_path[j]\n v = c_path[j + 1]\n if G.has_edge(u, v):\n edge_attr = G.edge[u][v]\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n for n in range(len(root_path) - 1):\n node = root_path[n]\n\n # out-edges\n\n for (u, v, edge_attr) in G.edges_iter(node, data=True):\n\n # print 'lala1: {} -> {}'.format(u,v)\n\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n if G.is_directed():\n\n # in-edges\n\n for (u, v, edge_attr) in G.in_edges_iter(node,\n data=True):\n\n # print 'lala2: {} -> {}'.format(u,v)\n\n G.remove_edge(u, v)\n edges_removed.append((u, v, edge_attr))\n\n (spur_path_length, spur_path) = nx.single_source_dijkstra(G,\n spur_node, target, weight=weight)\n if target in spur_path and spur_path[target]:\n total_path = root_path[:-1] + spur_path[target]\n total_path_length = self.get_path_length(G_original,\n root_path, weight) + spur_path_length[target]\n heappush(B, (total_path_length, next(c), total_path))\n\n for e in edges_removed:\n (u, v, edge_attr) = e\n G.add_edge(u, v, edge_attr)\n\n if B:\n (l, _, p) = heappop(B)\n lengths.append(l)\n paths.append(p)\n else:\n break\n\n return (lengths, paths)",
"def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class",
"def findknn(xTr,xTe,k):\n\n # YOUR CODE HERE\n if k > len(xTr):\n k = len(xTr)\n \n D=l2distance(xTe, xTr)\n (m,n) = D.shape\n \n indices = []\n dists = []\n for i in range(m):\n smallest_indices = np.argsort(D[i])\n ind = smallest_indices[:k]\n dis = D[i,smallest_indices[:k]]\n indices.append(ind)\n dists.append(dis)\n \n indices = np.transpose(np.array(indices))\n dists = np.transpose(np.array(dists))\n return indices, dists",
"def get_closest_relationship(self, point, n=1):\n n = min(n,len(self.rtype_vectors))#prevent index error\n if n > 1:\n tmp = zip(*self.rkdtree.query(point,n))\n return [(d, self.rkdtree_keys[i]) for d,i in tmp]\n else:\n dist, id = self.rkdtree.query(point,n)\n return [(dist, self.rkdtree_keys[id])]",
"def shortestPath(self, source, target):\n dist = {}\n prev = {}\n q = []\n for y,a in enumerate(self.sm):\n for x,b in enumerate(self.sm[y]):\n dist[(x,y)] = sys.maxint\n prev[(x,y)] = None\n q.append((x,y))\n dist[source] = 0\n\n while len(q) is not 0:\n # find the node with minimum value (u)\n d = deepcopy(dist)\n while True:\n b = dict(map(lambda item: (item[1],item[0]), d.items()))\n u = b[min(b.keys())]\n if u not in q:\n d.pop(u)\n else:\n break\n\n if dist[u] == sys.maxint: # remaining nodes are inaccessible\n break\n\n q.remove(u)\n\n\n if u == target: # target found\n break\n\n for v in self.getNeighbors(u):\n alt = dist[u] + 1\n if alt < dist[v]:\n dist[v] = alt\n prev[v] = u\n\n s = []\n u = target\n while prev[u] is not None:\n s.append(u)\n u = prev[u]\n s.reverse()\n\n return s",
"def nearest(self, value):\n coords = value[:2] # value only has 2 coords (x, y) right now, but it may have theta in the future\n hits = self.idx.nearest(self.make_bounding_box(coords), 1, objects=False)\n for hit in hits:\n # take the first index in the event of any ties\n return self.nodes[hit]\n \n \n \n #assert that value is valid here\n \"\"\"def recur(node, depth=0):\n closest, distance = node, self.cost(node.value, value)\n if depth < self.max_size:\n for child in node.children:\n (child_closest, child_distance) = recur(child, depth+1)\n if child_distance < distance:\n closest = child_closest\n distance = child_distance \n return closest, distance\n return recur(self.root)[0]\"\"\"",
"def visit_k_nearest(node, pt, k, result):\n # rather brute force but because cut off and k expected to be rather small\n # not further optimized\n # (result could instead of list be a bin heap with at most k items)\n for active, item in zip(node.active, node.items):\n # check active items\n if active:\n d = distance2(pt, item)\n result.append( (d, item) )\n # sort on distance\n result.sort(key=lambda x: x[0])\n # keep max k items\n while len(result) > k:\n result.pop()",
"def KDsearch(current, target, neighbor, distance):\n \n # Base case. Return the distance and the nearest neighbor.\n if current is None:\n return neighbor, distance\n index = current.axis\n d = target - current\n if d < distance:\n distance = d\n neighbor = current\n if target < current: # Recursively search 'left'\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n # Back up if needed\n if target.data[index] + distance >= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n else: # Recursively search 'right'\n neighbor, distance = KDsearch(\n current.right, target, neighbor, distance)\n # Back up if needed\n if target.data[index] - distance <= current.data[index]: # (?)\n neighbor, distance = KDsearch(\n current.left, target, neighbor, distance)\n \n return neighbor, distance",
"def get_nearest(src_points, candidates, k_neighbors=1):\n\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n distances, indices = tree.query(src_points, k=k_neighbors)\n\n # Transpose to get distances and indices into arrays\n distances = distances.transpose()\n indices = indices.transpose()\n\n # Get closest indices and distances (i.e. array at index 0)\n # note: for the second closest points, you would take index 1, etc.\n closest = indices[0]\n closest_dist = distances[0]\n\n # Return indices and distances\n return closest, closest_dist",
"def search(self, search_term: Point, k: int) -> [Point]:\n\n # Find k nearest neighbours\n self.neighbours, self.closed_list, self.b_list, self.t_list = [], [], [], []\n self.forward_traverse(None, self.root, search_term, 0, k)\n\n # Flatten neighbour list to points-only and sort by distance.\n if self.neighbours:\n self.neighbours = sorted(self.neighbours, key=lambda k: k['dist'])\n self.neighbours = [n['point'] for n in self.neighbours]\n\n\n\n return self.neighbours",
"def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result",
"def get_neighbors(training_set, \r\n labels, \r\n test_instance, \r\n k, \r\n distance=distance):\r\n distances = []\r\n for index in range(len(training_set)):\r\n dist = distance(test_instance, training_set[index])\r\n distances.append((training_set[index], dist, labels[index]))\r\n distances.sort(key=lambda x: x[1])\r\n neighbors = distances[:k]\r\n return neighbors",
"def search_highest_k_neighbor(self, k):\n max_score = 0\n target_node = None\n from_idx = None\n to_idx = None\n for i in range(k):\n node = self.graph.nodes[random.randrange(len(self.graph.nodes))]\n cluster_idx = self.search_cluster_by_node(node)\n if len(self.result[cluster_idx].get_nodes()) == 1:\n end_i = len(self.result)\n else:\n end_i = len(self.result) + 1\n\n random_cluster_idx = random.randrange(end_i)\n if random_cluster_idx != cluster_idx:\n tried_score = self.try_replace_node(node, cluster_idx, random_cluster_idx)\n if max_score < tried_score:\n max_score = tried_score\n target_node = node\n from_idx = cluster_idx\n to_idx = random_cluster_idx\n\n return max_score, target_node, from_idx, to_idx",
"def FindPredeccesor(self, id):\r\n node = self.nodeInfo\r\n while True:\r\n succNode = self.RemoteGetSuccessor(node.Address)\r\n if self.IsInRange(id, node.HashValue, False,succNode.HashValue, True) == False:\r\n node = self.RemoteClosestPrecedingFinger(node.Address, id)\r\n else:\r\n break\r\n return node",
"def find_nearest(ref_array,target_array):\n ref_tree = scipy.spatial.cKDTree(ref_array)\n dist, indices = ref_tree.query(target_array, k=1)\n return indices",
"def k_shortest_paths(G, source, target, k=1, weight='weight'):\n\tif source == target:\n\t\treturn ([0], [[source]]) \n\t \n\tlength, path = nx.single_source_dijkstra(G, source, target, weight=weight)\n\tif target not in length:\n\t\traise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\t\t\n\tlengths = [length[target]]\n\tpaths = [path[target]]\n\tc = count()\t\t\n\tB = []\t\t\t\t\t\t\n\tG_original = G.copy()\t\n\t\n\tfor i in range(1, k):\n\t\tfor j in range(len(paths[-1]) - 1):\t\t\t\n\t\t\tspur_node = paths[-1][j]\n\t\t\troot_path = paths[-1][:j + 1]\n\t\t\t\n\t\t\tedges_removed = []\n\t\t\tfor c_path in paths:\n\t\t\t\tif len(c_path) > j and root_path == c_path[:j + 1]:\n\t\t\t\t\tu = c_path[j]\n\t\t\t\t\tv = c_path[j + 1]\n\t\t\t\t\tif G.has_edge(u, v):\n\t\t\t\t\t\tedge_attr = G.edge[u][v]\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tfor n in range(len(root_path) - 1):\n\t\t\t\tnode = root_path[n]\n\t\t\t\t# out-edges\n\t\t\t\tfor u, v, edge_attr in G.copy().edges_iter(node, data=True):\n\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\t\n\t\t\t\tif G.is_directed():\n\t\t\t\t\t# in-edges\n\t\t\t\t\tfor u, v, edge_attr in G.in_edges_iter(node, data=True):\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tspur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)\t\t\t\n\t\t\tif target in spur_path and spur_path[target]:\n\t\t\t\ttotal_path = root_path[:-1] + spur_path[target]\n\t\t\t\ttotal_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\t\t\t\t\n\t\t\t\theappush(B, (total_path_length, next(c), total_path))\n\t\t\t\t\n\t\t\tfor e in edges_removed:\n\t\t\t\tu, v, edge_attr = e\n\t\t\t\tG.add_edge(u, v, edge_attr)\n\t\t\t\t\t \n\t\tif B:\n\t\t\t(l, _, p) = heappop(B)\n\t\t\tlengths.append(l)\n\t\t\tpaths.append(p)\n\t\telse:\n\t\t\tbreak\n\t\n\treturn (lengths, paths)",
"def k_shortest_paths(G, source, target, k=1, weight='weight'):\n\tif source == target:\n\t\treturn ([0], [[source]]) \n\t \n\tlength, path = nx.single_source_dijkstra(G, source, target, weight=weight)\n\tif target not in length:\n\t\traise nx.NetworkXNoPath(\"node %s not reachable from %s\" % (source, target))\n\t\t\n\tlengths = [length[target]]\n\tpaths = [path[target]]\n\tc = count()\t\t\n\tB = []\t\t\t\t\t\t\n\tG_original = G.copy()\t\n\t\n\tfor i in range(1, k):\n\t\tfor j in range(len(paths[-1]) - 1):\t\t\t\n\t\t\tspur_node = paths[-1][j]\n\t\t\troot_path = paths[-1][:j + 1]\n\t\t\t\n\t\t\tedges_removed = []\n\t\t\tfor c_path in paths:\n\t\t\t\tif len(c_path) > j and root_path == c_path[:j + 1]:\n\t\t\t\t\tu = c_path[j]\n\t\t\t\t\tv = c_path[j + 1]\n\t\t\t\t\tif G.has_edge(u, v):\n\t\t\t\t\t\tedge_attr = G.edge[u][v]\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tfor n in range(len(root_path) - 1):\n\t\t\t\tnode = root_path[n]\n\t\t\t\t# out-edges\n\t\t\t\tfor u, v, edge_attr in G.copy().edges_iter(node, data=True):\n\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\t\n\t\t\t\tif G.is_directed():\n\t\t\t\t\t# in-edges\n\t\t\t\t\tfor u, v, edge_attr in G.in_edges_iter(node, data=True):\n\t\t\t\t\t\tG.remove_edge(u, v)\n\t\t\t\t\t\tedges_removed.append((u, v, edge_attr))\n\t\t\t\n\t\t\tspur_path_length, spur_path = nx.single_source_dijkstra(G, spur_node, target, weight=weight)\t\t\t\n\t\t\tif target in spur_path and spur_path[target]:\n\t\t\t\ttotal_path = root_path[:-1] + spur_path[target]\n\t\t\t\ttotal_path_length = get_path_length(G_original, root_path, weight) + spur_path_length[target]\t\t\t\t\n\t\t\t\theappush(B, (total_path_length, next(c), total_path))\n\t\t\t\t\n\t\t\tfor e in edges_removed:\n\t\t\t\tu, v, edge_attr = e\n\t\t\t\tG.add_edge(u, v, edge_attr)\n\t\t\t\t\t \n\t\tif B:\n\t\t\t(l, _, p) = heappop(B)\t\t\n\t\t\tlengths.append(l)\n\t\t\tpaths.append(p)\n\t\telse:\n\t\t\tbreak\n\t\n\treturn (lengths, paths)",
"def find_paths(self, start_key, target_key):\n\n stack = [(start_key, [start_key])]\n while stack:\n node_key, path = stack.pop()\n node = self.nodes[node_key]\n for nxt in node.neighbors - set(path):\n if nxt == target_key:\n yield path + [nxt]\n else:\n stack.append((nxt, path + [nxt]))"
] | [
"0.65448064",
"0.63752544",
"0.6327887",
"0.6252531",
"0.62341696",
"0.6202399",
"0.6175748",
"0.6170855",
"0.61633563",
"0.6134241",
"0.6043866",
"0.60149163",
"0.6012577",
"0.601178",
"0.59670776",
"0.596042",
"0.5876144",
"0.5814865",
"0.5796329",
"0.5743522",
"0.57418907",
"0.57399327",
"0.57319903",
"0.57160634",
"0.57107997",
"0.5710152",
"0.57077295",
"0.5662452",
"0.5662452",
"0.5659156"
] | 0.7600108 | 0 |
Remove the given bucket from the routing table, split the bucket in two buckets each spanning halve the original bucket's ID space, redistribute the nodes to the appropriate buckets and add the buckets to the routing table. | def _splitBucket(self, bucket):
idx = self.buckets.index(bucket)
self.buckets.pop(idx)
middle = int(bucket.low + (bucket.high - bucket.low)/2)
bucketLow = Bucket(bucket.low, middle, bucket.refreshed)
bucketHigh = Bucket(middle+1, bucket.high, refreshed.refreshed)
self.buckets.append(bucketLow)
self.buckets.append(bucketHigh)
for bucket in bucket.nodes:
if bucketLow.inRange(bucket):
bucketLow.addNode(bucket)
else:
bucketHigh.addNode(bucket)
return (bucketLow, bucketHigh) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_group_bucket():\n pass",
"def delete_bucket_replication(Bucket=None):\n pass",
"def delete_bucket(Bucket=None):\n pass",
"def remove(self, key: int) -> None:\n hashKey = key % 1000\n prev = node = self.bucket[hashKey]\n if not node: return\n if node.pair[0] == key:\n self.bucket[hashKey] = node.next\n else:\n node = node.next\n while node:\n if node.pair[0] == key:\n prev.next = node.next\n break\n else:\n prev, node = prev.next, node.next",
"def remove_bucket(args):\n \n res_get = sync.get()\n if res_get != 0:\n return 4\n \n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n \n if does_bucket_exist(args) != 0:\n return 2\n\n #Potential issue: we need to validate the bucket keys are correct and this is how we do it\n #However, issue with this occurs if the bucket no longer exists in Echo. This currently prevents it from being\n #removed as an entry in DynaFed. \n #Potential solution: if we are an admin, bypass this keys check. This would mean the user cannot remove a bucket\n #entry that doesn't exist. Not sure how to get around this while keeping the key validation in place\n #TL;DR not a massive issue but still annoying\n\n if hasattr(args, 'admin_operation') and hasattr(args, 'groups'):\n admin_operation = args.admin_operation and \"dynafed/admins\" in args.groups\n \n if not admin_operation:\n\n # Validate bucket exists in Echo\n if update_bucket_cors(args) != 0:\n\n # Bucket could not be matched in Echo. Bucket may no longer exist\n if do_keys_match_bucket(args) != 0:\n return 3\n elif update_bucket_cors(args) != 0:\n if do_keys_match_bucket(args) != 0:\n return 3\n\n remove_bucket_from_config_file(args)\n remove_bucket_from_json(args)\n\n res_put = sync.put()\n if res_put != 0:\n return 4\n\n res_get = sync.get()\n if res_get != 0:\n return 4\n\n return 0",
"def dec(self, key: str) -> None:\n if key not in self.bucket_of_keys:\n return\n bucket, prev_bucket = self.bucket_of_keys[key], self.bucket_of_keys[key].prev\n self.bucket_of_keys.pop(key, None)\n if bucket.value > 1:\n if bucket is self.buckets.begin() or prev_bucket.value < bucket.value - 1:\n prev_bucket = self.buckets.insert(bucket, Node(bucket.value - 1, set()))\n prev_bucket.keys.add(key)\n self.bucket_of_keys[key] = prev_bucket\n bucket.keys.remove(key)\n if not bucket.keys:\n self.buckets.erase(bucket)",
"def test_delete_buckets(self):\n pass",
"def tablename_to_bucket(prefix, tablename):\n if tablename.startswith(prefix):\n return tablename.replace(prefix, '', 1)\n return None",
"def remove(self, key):\n index = key % self.size\n prev = cur = self.bucket[index]\n\n if not cur: return\n if cur.key == key:\n self.bucket[index] = cur.next\n return\n\n cur = cur.next\n while cur:\n if cur.key == key:\n prev.next = cur.next\n break\n else:\n cur = cur.next\n prev = prev.next",
"def remove(self, key):\n\n node = self._get_node(key) # Check to see if the key is in the table\n if node is None: # Key is not in the table (do nothing)\n return\n\n index = self._get_index(key) # Get the index for the LinkedList\n node = self._buckets[index].head # Start at the head of the LinkedList\n\n if node.key == key: # Handle the case where key is at the head\n self._buckets[index].head = node.next\n\n else:\n previous = node\n current = node.next\n while current.key != key: # Find the link with the right key\n previous = current\n current = current.next\n previous.next = current.next # Cut the link out of the list\n\n self.size -= 1",
"def remove_bucket_from_json(args):\n\n sanitised_group = args.group.replace('/', '-')\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == sanitised_group:\n for bucket in group[\"buckets\"]:\n if bucket[\"name\"] == args.bucket:\n config_json[\"groups\"].remove(group)\n group[\"buckets\"].remove(bucket)\n if group[\"buckets\"]: # if no more buckets in group, delete that group\n config_json[\"groups\"].append(group)\n with open(args.file, \"w\") as f:\n json.dump(config_json, f, indent=4)\n return 0\n break\n return 1",
"def delete_bucket(self, name):\n return",
"def delete_bucketlist():\n pass",
"def get_bucket(aMap, key):\n\t#uses the hash_key to give us a bucket where the key COULD be in\n\t#since it's possible that we'd get the same hash_key for two diff values\n\tbucket_id = hash_key(aMap, key)\n\treturn aMap[bucket_id]",
"def delete_bucket_website(Bucket=None):\n pass",
"def manipulate_bucketlist():\n pass",
"def delete_bucket_tagging(Bucket=None):\n pass",
"def remove_cell(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n cell.bucket().remove(cell)\r\n if self[self.max_gain] == cell.bucket() and len(cell.bucket()) == 0:\r\n self.decrement_max_gain()\r\n cell.bucket_num = None",
"def updatebucket(bdict, tstamp, keys):\n bdict[1].append(tstamp)\n for key in bdict.keys():\n if len(bdict[key]) > 2:\n bdict[key].pop(0)\n oldfirststamp = bdict[key].pop(0)\n # klist[-1] is the last bucket\n if key != keys[-1]:\n # merged by next bucket\n bdict[key * 2].append(oldfirststamp)\n else:\n break",
"def test_delete_bucket(self):\n pass",
"def delete_whole_bucket(bucket):\n bucket = s3r.Bucket(bucket)\n for key in bucket.objects.all():\n key.delete()\n bucket.delete()\n print(bucket, \" : is deletd \")",
"def split_kbucket(self):\n cur_range_size = self.range_max - self.range_min\n half_point = self.range_min + cur_range_size // 2\n\n # Ensure no empty range is created.\n assert self.range_min < half_point < self.range_max\n\n # Make the instantiation dependent on the actual class,\n # for easy inheritance.\n new_kbucket = self.__class__(half_point, self.range_max)\n\n # Halve the ID space of the split KBucket.\n self.range_max = half_point\n\n # Split the contact list into two, according to the new ranges.\n self._contacts, new_kbucket._contacts = util.partition(\n self._contacts,\n self.contact_in_range\n )\n\n return new_kbucket",
"def split_kbucket(self):\n new_kbucket = super(CachingKBucket, self).split_kbucket()\n\n cache_self, cache_new = util.partition(\n self._replacement_cache,\n self.contact_in_range\n )\n\n # Replacement caches are deques, so we can't directly assign\n # the values returned by partition.\n new_kbucket._replacement_cache.extend(cache_new)\n self._replacement_cache.clear()\n self._replacement_cache.extend(cache_self)\n\n self.fill_from_cache()\n new_kbucket.fill_from_cache()\n\n return new_kbucket",
"def delete_bucket_cors(Bucket=None):\n pass",
"def delete_bucket_acl(self, bucket, user):\n msg = \"delete_bucket_acl not implemented\"\n raise NotImplementedError(msg)",
"def delete_bucket_policy(Bucket=None):\n pass",
"def _pair_based_graph_cut(self, graph):\n for node in self._find_paired_nodes(graph):\n graph.remove_node(node)\n return",
"def _bucket_delitem(self, j, k):\n bucket = self._table[j]\n if bucket is None: # no match found\n raise KeyError(\"Key Error: \" + repr(k))\n del bucket[k]",
"def get_bucket(aMap,key):\n\tbucket_id=hash_key(aMap,key)\n\treturn aMap[bucket_id]",
"def remove_bucket(self, arg_bucket_name):\n # In success case returns [True, name_of_deleted_bucket],\n # else - print Error info and return [False, error_info_data].\n try:\n self.s3_client.delete_bucket(Bucket=arg_bucket_name)\n return [True, arg_bucket_name]\n except ClientError as err:\n return [False, self.parse_exception('remove_bucket', err)]"
] | [
"0.65343845",
"0.62422407",
"0.5784059",
"0.55969435",
"0.5557828",
"0.5495421",
"0.5392276",
"0.5360948",
"0.53419083",
"0.5333039",
"0.52992237",
"0.529006",
"0.528266",
"0.52823454",
"0.5254394",
"0.5252794",
"0.5247381",
"0.5245212",
"0.5236542",
"0.5232374",
"0.5227516",
"0.51816875",
"0.5178423",
"0.5163135",
"0.5116398",
"0.50995547",
"0.5095427",
"0.50808775",
"0.50540715",
"0.5043092"
] | 0.70319253 | 0 |
Creates a call status class based on the monitoring backend | def create_call_status(job, internal_storage):
monitoring_backend = job.config['lithops']['monitoring']
Status = getattr(lithops.worker.status, '{}CallStatus'
.format(monitoring_backend.capitalize()))
return Status(job, internal_storage) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def createStatus(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createStatus\"], *args, **kwargs)",
"def status_api(config: dict, **kwargs):\n cfg = Config.from_dict(config)\n return status(cfg=cfg, **kwargs)",
"def __init__(self: \"Status\") -> None:\n raise NotImplementedError(\n \"Please instantiate one of the `Status` \"\n \"subclasses:\\n\"\n \"\\n\\t- `Failed`\"\n \"\\n\\t- `NotStarted`\"\n \"\\n\\t- `InProgress(progress)`\"\n \"\\n\\t- `Succeeded`\"\n )",
"def createStatusObject(self):\n if self.config_filepath is None:\n return False\n\n self.status = GARunStatus(self.config_filepath)\n return True",
"def __init__(self, *args, **kwds):\n DiagnosticStatus.__init__(self, *args, **kwds)",
"def getStatus():",
"def _create_status(self):\n if self.headers['Accept'] != CONTENT_TYPE_STATUS:\n raise NotAcceptable()\n\n body = self.server.status()\n self._write_response(\n 200, body,\n content_type='application/se.novafaen.smrt.status.v1+json'\n )\n self.server.successful_response()",
"def _get_status_obj(self):\n\n status = Status(self._config.dirout, name=self._config.name,\n hardware=self._config.hardware)\n return status",
"def get_status(request):\n if \"liveness\" in request.query_params:\n return Response({\"alive\": True})\n\n app_status = ApplicationStatus()\n celery_param = request.query_params.get(\"celery\", \"false\").lower()\n if celery_param == \"true\":\n return Response(app_status.celery_task_status)\n\n response = {\n \"api_version\": app_status.api_version,\n \"celery_status\": app_status.celery_status,\n \"commit\": app_status.commit,\n \"current_datetime\": app_status.current_datetime,\n \"database_status\": app_status.database_status,\n \"debug\": app_status.debug,\n \"modules\": app_status.modules,\n \"platform_info\": app_status.platform_info,\n \"python_version\": app_status.python_version,\n }\n return Response(response)",
"def __get_status_api(self):\r\n try:\r\n return Call_shelly_api(url=self.__api_address + \"/status\")\r\n except ShellyException as err:\r\n _LOGGER.warning(err)",
"def status( self ):\n duration = datetime.datetime.now() - self.startTime\n status = {\n 'start': self.startTime.isoformat(),\n 'now': datetime.datetime.now().isoformat(),\n 'duration': duration.total_seconds(),\n 'bookmark': 0,\n 'events': 0,\n 'cumulative_rate': 0,\n 'processes': [],\n 'state': {\n 'id': self.state,\n 'description': definitions.STATE_STRING[self.state]\n }\n }\n\n # Sending pipes to processes which are not running or shutting down\n # will lead to errors and deadlocks. Loop through to detect errors.\n if self.state == definitions.STATE_RUNNING:\n # Loop through all processes and just check we're running properly\n for proxy in self.processes:\n if not proxy.process.is_alive():\n self.logger.info( 'Process {0} is dead.'.format( proxy.name ))\n self.state = definitions.STATE_ERROR\n break\n\n if proxy.request( 'status' )['state'] == definitions.STATE_ERROR:\n self.logger.info( 'Process {0} state is {1}.'.format(\n proxy.name,\n definitions.STATE_STRING[ definitions.STATE_ERROR ]\n ))\n\n self.state = definitions.STATE_ERROR\n break\n\n # Now do the actual status checks\n if self.state == definitions.STATE_RUNNING:\n # Loop through processes in order\n for proxy in self.processes:\n response = proxy.request('status')\n\n proc = {\n 'name': proxy.name,\n 'pid': proxy.process.pid,\n 'count': response['count'],\n 'sleep': response['sleep']\n }\n\n status['events'] = proc['count']\n status['processes'].append( proc )\n\n if 'bookmark' in response:\n status['bookmark'] = response['bookmark']\n\n status['cumulative_rate'] = round(\n status['events'] / duration.total_seconds(), 2)\n\n return status",
"def _init_status(self):\n def status(core, args):\n extra = None\n\n for key in ['day', 'week', 'month', 'year', 'span', 'task']:\n if getattr(args, key) is not None:\n extra = (key, ' '.join(getattr(args, key)))\n break\n\n return core.status(extra=extra)\n\n usage = (\n 'stl (status|show) '\n '[-d ... | -w ... | -m ... | -y ... | -s ... | -t ...]'\n )\n desc = (\n 'show a status report; '\n 'when called without further arguments, '\n 'it will tell you what you are doing now'\n )\n\n subp = self.subparsers.add_parser(\n 'status', aliases=['show'], usage=usage, description=desc,\n help=desc[:desc.find(';')])\n\n group = subp.add_mutually_exclusive_group()\n group.add_argument(\n '-d', '--day', nargs=argparse.REMAINDER,\n help=('report for the given day, '\n 'e.g. 15 oct, 2016-10-15, today, yesterday; '\n 'empty string defaults to today'))\n group.add_argument(\n '-w', '--week', nargs=argparse.REMAINDER,\n help=('report for the given week, '\n 'possible values are this and last; '\n 'empty string defaults to this week'))\n group.add_argument(\n '-m', '--month', nargs=argparse.REMAINDER,\n help=('report for the given month, '\n 'e.g. oct, 10, 10 2016, this, last; '\n 'empty string defaults to this month'))\n group.add_argument(\n '-y', '--year', nargs=argparse.REMAINDER,\n help=('report for the given year, '\n 'e.g. 2016, this, last; '\n 'empty string defaults to this year'))\n group.add_argument(\n '-s', '--span', nargs=argparse.REMAINDER,\n help=('report for the time span between two dates '\n '(inclusive), e.g. 15 25 oct, 15 sep 2016 25 oct 2016, '\n '15 sep 25 oct; if you specify only one date, '\n 'the second will be set to today; some restrictions: '\n 'the second date (if such) cannot be less specific '\n 'than the first and months cannot be numbers'))\n group.add_argument(\n '-t', '--task', nargs=argparse.REMAINDER,\n help='report for the given task')\n\n subp.set_defaults(func=status)",
"def status(*args, **kwargs): # real signature unknown\n pass",
"def __init__(self, *args, **kwds):\n if args or kwds:\n super(LIMITS_STATUS, self).__init__(*args, **kwds)\n #message fields cannot be None, assign default values for those that are\n if self.sysid is None:\n self.sysid = 0\n if self.compid is None:\n self.compid = 0\n if self.limits_state is None:\n self.limits_state = 0\n if self.last_trigger is None:\n self.last_trigger = 0\n if self.last_action is None:\n self.last_action = 0\n if self.last_recovery is None:\n self.last_recovery = 0\n if self.last_clear is None:\n self.last_clear = 0\n if self.breach_count is None:\n self.breach_count = 0\n if self.mods_enabled is None:\n self.mods_enabled = 0\n if self.mods_required is None:\n self.mods_required = 0\n if self.mods_triggered is None:\n self.mods_triggered = 0\n else:\n self.sysid = 0\n self.compid = 0\n self.limits_state = 0\n self.last_trigger = 0\n self.last_action = 0\n self.last_recovery = 0\n self.last_clear = 0\n self.breach_count = 0\n self.mods_enabled = 0\n self.mods_required = 0\n self.mods_triggered = 0",
"def __new__(cls, value: int):\n new_object = super(CalibrationStatus, cls).__new__(cls, value)\n new_object._value = value # type: ignore[attr-defined]\n new_object._binary = new_object._to_binary() # type: ignore[attr-defined]\n return new_object",
"def status(cls, stat, request=Retrieve):\n res = cls.STATUS_MAP.get(stat)\n if res is None:\n res = status.Status('%d.00' % (stat // 100))\n if res.success:\n res = request.success\n return res",
"def _read_status(self, cls=MySQLStatus):",
"def test_get_status(self) -> None:\n\n given = \"example.org\"\n\n # This is an abstract method. So we need to define it.\n self.checker.query_status = lambda: None\n\n self.checker.subject = given\n\n actual = self.checker.get_status()\n\n self.assertIsInstance(actual, CheckerStatusBase)",
"def _status(self, host):\n pass",
"def model_status():\n return juju.CLIENT.Client(request=\"FullStatus\")",
"def status():\n pass",
"def status(self):\n now = int(time())\n return {\n 'smrt': {\n 'smrt_version': '1.0.0',\n 'app_loaded': True,\n 'uptime': now - self._started\n },\n 'application': {\n 'name': 'Cogsworth',\n 'status': 'OK',\n 'version': '0.0.1'\n },\n 'server_time': now,\n 'status': {\n 'amount_successful': self._requests_successful,\n 'amount_warning': self._requests_warning,\n 'amount_error': self._requests_error,\n 'amount_bad': self._requests_bad,\n 'amount_total': (self._requests_successful\n + self._requests_warning\n + self._requests_error\n + self._requests_bad)\n }\n }",
"async def get_status():",
"def call_status():\n\n if 'mocean-call-uuid' in request.form:\n call_uuid = request.form.get('mocean-call-uuid')\n logging.info(f'### Call status received [{call_uuid}] ###')\n for k, v in request.form.items():\n logging.info(f'\\t{k}:{v}')\n\n if request.form.get('mocean-call-uuid') in calls \\\n and request.form.get('mocean-status') == 'HANGUP':\n logging.debug(f'Deleting call-uuid[{call_uuid}] from calls dict')\n del calls[call_uuid]\n call_ended.append(call_uuid)\n return Response('', status=204, mimetype='text/plain')\n else:\n return invalid_response()",
"def remote_status():",
"def _get_status(self):\n return self.__status",
"def status(self, status: dict):\n pass",
"def translate_from_rpc(rpcActuatorOutputStatus):\n return ActuatorOutputStatus(\n \n rpcActuatorOutputStatus.active,\n \n \n rpcActuatorOutputStatus.actuator\n )",
"def status(self):",
"def status_class(status):\n status = status.split('-')[-1] # e.g. \"overall-passed\" -> \"passed\"\n classes = {\n 'passed': 'success',\n 'failed': 'danger',\n 'skipped': 'warning',\n 'match': 'success',\n 'diff': 'danger',\n 'missing': 'warning',\n 'generated': 'warning',\n }\n return classes[status]"
] | [
"0.6328769",
"0.61205095",
"0.5998343",
"0.59102756",
"0.589642",
"0.5886395",
"0.5878291",
"0.5816793",
"0.5735519",
"0.57342607",
"0.5676877",
"0.56198055",
"0.5617202",
"0.56133866",
"0.55926067",
"0.5591889",
"0.5588031",
"0.5572215",
"0.55713326",
"0.55615175",
"0.55567026",
"0.55327696",
"0.55206645",
"0.551895",
"0.55187607",
"0.5507078",
"0.5491386",
"0.54659176",
"0.5465776",
"0.54609126"
] | 0.7931719 | 0 |
Sends the init event | def send_init_event(self):
self.status['type'] = '__init__'
self._send() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _initialize(self):\n self.send_init_command()",
"def onInit(self):\n pass",
"def on_start(self):\n self.init()",
"def onInit(*args):",
"def onInit(*args):",
"def onInit(*args):",
"def onInit(*args):",
"def do_init(self):\n\n pass",
"def init():\n pass",
"def on_initialize(self) -> None:\n pass",
"def init():",
"def initialize(self):\n logger.debug(\"Begin Generation\")\n self.events.begin_generation()",
"def on_connection_init() -> None:\r\n print(\"\\nInitialize G-Earth connection\\n\")",
"def initialize(self,init):\n logger.info('*** initialize: worker id=%d',self._agent.wid)\n self.commands = {'initialize':None, 'before_do_work':None, 'after_do_work':None, 'finalize':None}\n self.commands.update(init.get(self._agent.wid,{}))\n exec_command(self.commands['initialize'])",
"def on_load(self):\n self.__init__()",
"def on_init_start(self):\n for callback in self.callbacks:\n callback.on_init_start(self)",
"def on_init(self):\n self.engineer_list = []\n self.list_items = []\n self.selected_items = []\n self.log_info(self.name + \" initialized\")\n self.bind('PUB', alias='main')",
"def initialise(self):\n self.set_up()",
"def initialize(self, *args, **kwargs):\n self.initialized = True",
"def autonomousInit(self):\n #self.timer.reset()\n #self.timer.start()\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def init(self):\n pass",
"def __init__(self):\n self.started = Event()",
"def Init(self, config):\r\n pass"
] | [
"0.7990824",
"0.752718",
"0.7522999",
"0.7409211",
"0.7409211",
"0.7409211",
"0.7409211",
"0.7294225",
"0.7091037",
"0.70493495",
"0.7023764",
"0.69746894",
"0.6971314",
"0.6968194",
"0.6942409",
"0.6934815",
"0.69088274",
"0.6777505",
"0.67478627",
"0.6718694",
"0.6697049",
"0.6697049",
"0.6697049",
"0.6697049",
"0.6697049",
"0.6697049",
"0.6697049",
"0.6697049",
"0.66809297",
"0.6673064"
] | 0.84639376 | 0 |
Sends the finish event | def send_finish_event(self):
self.status['type'] = '__end__'
self._send() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __finish(self):\n self.finished.emit()",
"def notify_finish_event(self):\n self.notify(self._finish_event_type())",
"def finish(self):\r\n self.start_finish()\r\n self.wait_finish()",
"def finished(self):\n\t\telog(\"finished\")",
"def finish():\n pass",
"def finish(self):\r\n\r\n self._is_finished = True",
"def start_finish(self):\r\n self.send_queue.put(('finish',))",
"def finish():",
"def finish():",
"def finish():",
"def finish():",
"def finish(self):\n pass",
"def finish(self):\n pass",
"def on_finish(self):\n pass",
"def finish(self):",
"def finish(self):",
"def finished(self):\n pass",
"def onDone(self):\n pass",
"def finished(self):",
"def onfinish():",
"def _onEnd(self, name, completed):\n logging.debug(\"onEnd...\")",
"def _end(self):\n\n self.logger.msg1(\"Done\")",
"def finish(self) -> None:",
"def finish(self) -> None:",
"def on_finish(self):",
"def on_finish(self):\n return self._on_finish",
"def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)",
"def _finished(self) -> None:",
"def end(self):\n self.my_print(\"\\t[DONE]\", msg_types.INFO)\n self.in_progress = False",
"def _do_done(self, event):\n self._done(event.result)"
] | [
"0.8138743",
"0.8032189",
"0.795691",
"0.79205817",
"0.79074085",
"0.78841597",
"0.78116596",
"0.7794982",
"0.7794982",
"0.7794982",
"0.7794982",
"0.77801937",
"0.77801937",
"0.7775521",
"0.7747547",
"0.7747547",
"0.76819074",
"0.76054335",
"0.7594948",
"0.7494719",
"0.74728334",
"0.7470768",
"0.74506056",
"0.74506056",
"0.74425167",
"0.74359256",
"0.73645985",
"0.73432827",
"0.72804064",
"0.72649944"
] | 0.85653186 | 0 |
5x5conv filter preserves fmap dimensions if stride=1 exactly halves fmap dimensions if stride=2 requires padding=2, dilation=1, kernel_size=5 becomes depthwise convolution when in_planes = out_planes = groups | def conv5x5(in_planes, out_planes, stride=1, groups=1):
return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride, groups=groups,
padding=2, dilation=1, bias=False) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def conv5x5(in_planes, out_planes, stride=1, groups=1, dilation=1):\n\n return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,\n padding=2, groups=groups, bias=False, dilation=dilation)",
"def conv5x5(in_planes, out_planes, stride=1):\n return nn.Conv2d(in_planes, out_planes, kernel_size=5, stride=stride,\n padding=2, bias=False)",
"def conv5x5(self, in_planes, out_planes, stride=1):\n c = self.conv(5, in_planes, out_planes, stride=stride)\n return c",
"def block(x, filters, kernel_size=3, up_stride=1, groups=32, conv_shortcut=True, name=None):\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n if conv_shortcut is True:\n if up_stride == 1:\n shortcut = layers.Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_0_conv')(x)\n else:\n shortcut = layers.Conv2DTranspose((64 // groups) * filters, 1, strides=up_stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n if up_stride == 1:\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\n else:\n x = layers.Conv2DTranspose(filters, 1, strides=up_stride, use_bias=False,\n padding='same', name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(kernel_size, depth_multiplier=c, use_bias=False, name=name + '_2_conv')(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(lambda x: sum([x[:, :, :, :, i] for i in range(c)]), output_shape=output_shape,\n name=name + '_2_reduce')(x)\n x = layers.Reshape(x_shape + (filters,))(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, use_bias=False, name=name + '_3_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def block1(x, filters, kernel_size=3, stride=1,\n conv_shortcut=True, dilation=1, name=None):\n bn_axis = 3 if backend.image_data_format() == 'channels_last' else 1\n\n if conv_shortcut is True:\n if stride == 1:\n shortcut = layers.Conv2D(4 * filters, 1, strides=stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = layers.Conv2D(4 * filters, 3, strides=stride, use_bias=False,\n name=name + '_0_conv')(x)\n shortcut = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_0_bn')(shortcut)\n else:\n shortcut = x\n\n x = layers.Conv2D(filters, 1, use_bias=False, name=name + '_1_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_1_bn')(x)\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n padding = 'SAME' if stride == 1 else 'VALID'\n x = layers.Conv2D(filters, kernel_size, strides=stride, padding=padding,\n dilation_rate=dilation, use_bias=False, name=name + '_2_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_2_bn')(x)\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(4 * filters, 1, use_bias=False, name=name + '_3_conv')(x)\n x = layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5,\n name=name + '_3_bn')(x)\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def __init__(self, in_channels, out_channels, kernel_size,\n stride=1, padding=0, dilation=1, groups=1, bias=True):\n super(BatchConv2D_5D, self).__init__()\n self.out_channels = out_channels\n self.conv = nn.Conv2d(in_channels*groups, out_channels*groups,\n kernel_size, stride=stride,\n padding=padding, dilation=dilation,\n groups=groups, bias=bias)",
"def block3(\n x,\n filters,\n kernel_size=3,\n stride=1,\n groups=32,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\",\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n (64 // groups) * filters,\n 1,\n strides=stride,\n use_bias=False,\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(shortcut, norm_use=norm_use, name=name + '_0_')\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n use_bias=False,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(\n kernel_size,\n strides=stride,\n depth_multiplier=c,\n use_bias=False,\n name=name + '_2_conv',\n kernel_initializer='he_normal',\n )(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(\n lambda x: sum([x[:, :, :, :, i] for i in range(c)]),\n output_shape=output_shape,\n name=name + '_2_reduce',\n )(x)\n x = layers.Reshape(x_shape + (filters, ))(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, kernel_initializer='he_normal',\n use_bias=False, name=name + '_3_conv')(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def conv_block(input_tensor, kernel_size, filters, strides=(2, 2)):\n\n filters1, filters2, filters3 = filters\n\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n x = layers.Conv2D(filters1, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters2, kernel_size, strides=strides, padding='same',\n use_bias=False, kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n x = layers.Activation('relu')(x)\n\n x = layers.Conv2D(filters3, (1, 1), use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(x)\n x = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(x)\n\n shortcut = layers.Conv2D(filters3, (1, 1), strides=strides, use_bias=False,\n kernel_initializer='he_normal',\n kernel_regularizer=regularizers.l2(L2_WEIGHT_DECAY))(input_tensor)\n shortcut = layers.BatchNormalization(axis=bn_axis,\n momentum=BATCH_NORM_DECAY,\n epsilon=BATCH_NORM_EPSILON)(shortcut)\n\n x = layers.add([x, shortcut])\n x = layers.Activation('relu')(x)\n return x",
"def conv_internal(conv_fn, inputs, filters, kernel_size, **kwargs):\n static_shape = inputs.get_shape()\n if not static_shape or len(static_shape) != 4:\n raise ValueError(\"Inputs to conv must have statically known rank 4. \"\n \"Shape: \" + str(static_shape))\n # Add support for left padding.\n if kwargs.get(\"padding\") == \"LEFT\":\n dilation_rate = (1, 1)\n if \"dilation_rate\" in kwargs:\n dilation_rate = kwargs[\"dilation_rate\"]\n assert kernel_size[0] % 2 == 1 and kernel_size[1] % 2 == 1\n height_padding = 2 * (kernel_size[0] // 2) * dilation_rate[0]\n cond_padding = tf.cond(\n tf.equal(shape_list(inputs)[2], 1), lambda: tf.constant(0),\n lambda: tf.constant(2 * (kernel_size[1] // 2) * dilation_rate[1]))\n width_padding = 0 if static_shape[2] == 1 else cond_padding\n padding = [[0, 0], [height_padding, 0], [width_padding, 0], [0, 0]]\n inputs = tf.pad(inputs, padding)\n # Set middle two dimensions to None to prevent convolution from complaining\n inputs.set_shape([static_shape[0], None, None, static_shape[3]])\n kwargs[\"padding\"] = \"VALID\"\n\n def conv2d_kernel(kernel_size_arg, name_suffix):\n \"\"\"Call conv2d but add suffix to name.\"\"\"\n name = \"{}_{}\".format(kwargs.get(\"name\", \"conv\"), name_suffix)\n original_name = kwargs.pop(\"name\", None)\n original_force2d = kwargs.pop(\"force2d\", None)\n result = conv_fn(inputs, filters, kernel_size_arg, name=name, **kwargs)\n if original_name is not None:\n kwargs[\"name\"] = original_name # Restore for other calls.\n if original_force2d is not None:\n kwargs[\"force2d\"] = original_force2d\n return result\n\n return conv2d_kernel(kernel_size, \"single\")",
"def up_conv_2d(input_tensor, nb_filters, name):\n resize = UpSampling2D(size=(2, 2), interpolation='nearest')(input_tensor)\n paddings = tf.constant([[0, 0], [1, 1], [1, 1], [0, 0]])\n resize = tf.pad(resize, paddings, \"SYMMETRIC\")\n output_layer = Conv2D(\n filters=nb_filters,\n kernel_size=(3, 3),\n activation='relu',\n name=name)(\n resize)\n\n return output_layer",
"def _convk(\n in_channels, out_channels, kernel_size=3, stride=1, groups=1, dilation=1, bias=False\n):\n padding = dilation * (kernel_size - 1) // 2\n return Conv1d(\n in_channels,\n out_channels,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n groups=groups,\n bias=bias,\n dilation=dilation,\n )",
"def conv4d(data,filters,bias=None,permute_filters=True,use_half=False):\n b,c,h,w,d,t=data.size()\n\n data=data.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n \n # Same permutation is done with filters, unless already provided with permutation\n if permute_filters:\n filters=filters.permute(2,0,1,3,4,5).contiguous() # permute to avoid making contiguous inside loop \n\n c_out=filters.size(1)\n if use_half:\n output = Variable(torch.HalfTensor(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n else:\n output = Variable(torch.zeros(h,b,c_out,w,d,t),requires_grad=data.requires_grad)\n \n padding=filters.size(0)//2\n if use_half:\n Z=Variable(torch.zeros(padding,b,c,w,d,t).half())\n else:\n Z=Variable(torch.zeros(padding,b,c,w,d,t))\n \n if data.is_cuda:\n Z=Z.cuda(data.get_device()) \n output=output.cuda(data.get_device())\n \n data_padded = torch.cat((Z,data,Z),0)\n \n\n for i in range(output.size(0)): # loop on first feature dimension\n # convolve with center channel of filter (at position=padding)\n output[i,:,:,:,:,:]=F.conv3d(data_padded[i+padding,:,:,:,:,:], \n filters[padding,:,:,:,:,:], bias=bias, stride=1, padding=padding)\n # convolve with upper/lower channels of filter (at postions [:padding] [padding+1:])\n for p in range(1,padding+1):\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding-p,:,:,:,:,:], \n filters[padding-p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n output[i,:,:,:,:,:]=output[i,:,:,:,:,:]+F.conv3d(data_padded[i+padding+p,:,:,:,:,:], \n filters[padding+p,:,:,:,:,:], bias=None, stride=1, padding=padding)\n\n output=output.permute(1,2,0,3,4,5).contiguous()\n return output",
"def test_matched_filter5():\n x_size = 80\n y_size = 90\n\n objects = numpy.zeros((1, 5))\n\n # Make filter with unit sum.\n objects[0,:] = [x_size/2, y_size/2, 1.0, 1.0, 1.0]\n psf = dg.drawGaussians((x_size, y_size), objects)\n psf = psf/numpy.sum(psf)\n flt = matchedFilterC.MatchedFilter(psf)\n\n # Make test image.\n image = numpy.zeros((x_size, y_size))\n image[int(x_size/2), int(y_size/2)] = float(100)\n\n mf_conv = flt.convolve(image)\n\n t1 = numpy.fft.fft2(recenterPSF.recenterPSF(psf))\n t2 = numpy.fft.fft2(image)\n np_conv = numpy.real(numpy.fft.ifft2(t1*t2))\n\n assert(numpy.allclose(mf_conv, np_conv))\n\n flt.cleanup()",
"def cifar10_5layers(input_image, keep_prob, init_method=tf.truncated_normal_initializer(stddev=1e-2)):\n with tf.variable_scope(\"conv1\"):\n W1 = tf.get_variable(name=\"W1\", shape=[5,5,3,32], dtype=tf.float32, \\\n initializer=init_method)\n b1 = tf.get_variable(name=\"b1\", shape=[32], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv1 = conv_pool_relu(input_image, W1, b1)\n with tf.variable_scope(\"conv2\"):\n W2 = tf.get_variable(name=\"W2\", shape=[5,5,32,64], dtype=tf.float32, \\\n initializer=init_method)\n b2 = tf.get_variable(name=\"b2\", shape=[64], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv2 = conv_pool_relu(conv1, W2, b2)\n\tconv2 = tf.nn.dropout(conv2, keep_prob)\n with tf.variable_scope(\"conv3\"):\n W3 = tf.get_variable(name=\"W3\", shape=[5,5,64,128], dtype=tf.float32, \\\n initializer=init_method)\n b3 = tf.get_variable(name=\"b3\", shape=[128], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3 = conv_pool_relu(conv2, W3, b3)\n\tconv3 = tf.nn.dropout(conv3, keep_prob)\n with tf.variable_scope(\"fc1\"):\n W4 = tf.get_variable(name=\"W4\", shape=[4*4*128,256], dtype=tf.float32, \\\n initializer=init_method)\n b4 = tf.get_variable(name=\"b4\", shape=[256], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n conv3_flat = tf.reshape(conv3, [-1, 4*4*128])\n fc1 = fc_relu(conv3_flat, W4, b4)\n\tfc1 = tf.nn.dropout(fc1, keep_prob)\n with tf.variable_scope(\"output\"):\n W5 = tf.get_variable(name=\"W5\", shape=[256,10], dtype=tf.float32, \\\n initializer=init_method)\n b5 = tf.get_variable(name=\"b5\", shape=[10], dtype=tf.float32, \\\n initializer=tf.constant_initializer(0.01))\n\ty_logit = tf.matmul(fc1, W5) + b5\n return y_logit, tf.nn.softmax(y_logit, name=\"softmax\")",
"def test_on_conv_transpose_2d_dilation_padding_same(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.SAME)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[6., 17., 20., 14.],\n [13., 46., 48., 34.],\n [32., 82., 92., 58.],\n [30., 69., 76., 44.]]]])\n self.assertEqual(features.shape, (1, 1, 4, 4))\n self.assertTrue(jn.array_equal(features, expected_features))",
"def test_deconv():\n\n # filter params\n R, S = 5, 5\n fshape = (R, S, 1)\n strides = 2\n filter_val_nz = np.arange(1, R * S + 1).reshape(R, S)\n filter_val = np.zeros(fshape)\n filter_val[:, :, 0] = filter_val_nz\n\n deconv = Deconvolution(fshape,\n filter_init=ConstantInit(filter_val),\n strides=strides,\n padding=0,\n dilation=1)\n\n N = ng.make_axis(name='N', length=1) # batch\n image_shape = (1, 8, 8) # CHW\n image_axes = ng.make_axes([ng.make_axis(name=nm, length=l)\n for nm, l in zip('CHW', image_shape)])\n image_axes |= N\n image = ng.placeholder(axes=image_axes)\n\n output = deconv(image)\n\n with closing(ngt.make_transformer()) as transformer:\n comp = transformer.add_computation(ng.computation(output, image))\n input_val = np.zeros(image_shape + (N.length, ), dtype=float)\n input_val[0, 0, 0] = 1\n input_val[0, 5, 5] = 1\n input_val[0, 7, 7] = 1\n result = comp(input_val)\n feature_map = np.squeeze(result)\n\n assert (feature_map[:5, :5] == filter_val_nz).all()\n\n result2 = filter_val_nz.copy()\n result2[-1, -1] = 26\n assert (feature_map[10:15, 10:15] == result2).all()\n\n result3 = filter_val_nz.copy()\n result3[0, 0] = 26\n assert (feature_map[-5:, -5:] == result3).all()",
"def block1(\n x,\n filters,\n kernel_size=3,\n stride=1,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\"\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n 4 * filters,\n 1,\n strides=stride,\n kernel_initializer='he_normal',\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(\n shortcut,\n norm_use=norm_use,\n name=name + '_0_',\n )\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n strides=stride,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n x = layers.Conv2D(\n filters,\n kernel_size,\n padding='SAME',\n kernel_initializer='he_normal',\n name=name + '_2_conv'\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name+'_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D(\n 4 * filters, 1,\n name=name + '_3_conv',\n kernel_initializer='he_normal'\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def __call__(self, inputs, output_stages='c5', **kwargs):\n if backend.image_data_format() == 'channels_last':\n bn_axis = 3\n else:\n bn_axis = 1\n\n dilation = self.dilation\n\n x = layers.ZeroPadding2D(padding=(3, 3), name='conv1_pad')(inputs)\n x = layers.Conv2D(64, (7, 7),\n strides=(2, 2),\n padding='valid',\n kernel_initializer='he_normal',\n name='conv1')(x)\n x = layers.BatchNormalization(axis=bn_axis, name='bn_conv1')(x)\n x = layers.Activation('relu')(x)\n x = layers.ZeroPadding2D(padding=(1, 1), name='pool1_pad')(x)\n x = layers.MaxPooling2D((3, 3), strides=(2, 2))(x)\n c1 = x\n\n x = self._conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))\n for i in range(self.params[0]):\n x = self._identity_block(x, 3, [64, 64, 256], stage=2, block=chr(ord('b') + i))\n c2 = x\n\n x = self._conv_block(x, 3, [128, 128, 512], stage=3, block='a')\n for i in range(self.params[1]):\n x = self._identity_block(x, 3, [128, 128, 512], stage=3, block=chr(ord('b') + i))\n c3 = x\n\n x = self._conv_block(x, 3, [256, 256, 1024], stage=4, block='a', dilation=dilation[0])\n for i in range(self.params[2]):\n x = self._identity_block(x, 3, [256, 256, 1024], stage=4, block=chr(ord('b') + i), dilation=dilation[0])\n c4 = x\n\n x = self._conv_block(x, 3, [512, 512, 2048], stage=5, block='a', dilation=dilation[1])\n for i in range(self.params[3]):\n x = self._identity_block(x, 3, [512, 512, 2048], stage=5, block=chr(ord('b') + i), dilation=dilation[1])\n c5 = x\n\n self.outputs = {'c1': c1,\n 'c2': c2,\n 'c3': c3,\n 'c4': c4,\n 'c5': c5}\n\n if type(output_stages) is not list:\n return self.outputs[output_stages]\n else:\n return [self.outputs[ci] for ci in output_stages]",
"def testMask5D(self):\n mask = np.ones((3, 3, 3, 5, 1), dtype=np.float32)\n inputs = tf.constant(1.0, shape=(5, 5, 5, 5, 5))\n conv1 = snt.Conv3D(\n output_channels=1,\n kernel_shape=3,\n mask=mask,\n padding=snt.VALID,\n use_bias=False,\n initializers=create_constant_initializers(1.0, 0.0, use_bias=False))\n out = conv1(inputs)\n expected_out = 135 * np.ones((5, 3, 3, 3, 1), dtype=np.float32)\n with self.test_session():\n tf.variables_initializer([conv1.w]).run()\n self.assertAllClose(out.eval(), expected_out)",
"def test_Pad3D5():\n input_shape = (1, 1, 2, 3)\n # pad = np.array([1, 0, 1, 2]).astype('int32')\n pad = [1, 0, 1, 2]\n mode = \"constant\"\n res = [[[[0, 0, 0, 0], [0, 1, 2, 3], [0, 4, 5, 6], [0, 0, 0, 0], [0, 0, 0, 0]]]]\n data = np.arange(np.prod(input_shape)).reshape(input_shape) + 1\n obj.run(res=res, padding=pad, mode=mode, data_format=\"NCHW\", data=data)",
"def deform_conv2d(x,\n offset,\n mask,\n num_filters,\n filter_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n deformable_groups=1,\n im2col_step=1,\n weight_attr=None,\n bias_attr=None,\n name=None):\n\n if mask is None:\n return paddle.fluid.layers.deformable_conv(\n input=x,\n offset=offset,\n mask=mask,\n num_filters=num_filters,\n filter_size=filter_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n deformable_groups=deformable_groups,\n im2col_step=im2col_step,\n param_attr=weight_attr,\n bias_attr=bias_attr,\n modulated=False,\n name=name)\n else:\n return paddle.fluid.layers.deformable_conv(\n input=x,\n offset=offset,\n mask=mask,\n num_filters=num_filters,\n filter_size=filter_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n deformable_groups=deformable_groups,\n im2col_step=im2col_step,\n param_attr=weight_attr,\n bias_attr=bias_attr,\n modulated=True,\n name=name)",
"def conv(in_planes,\n out_planes,\n kernel_size=3,\n stride=1,\n padding=1,\n dilation=1,\n groups=1):\n return nn.Conv2d(in_planes,\n out_planes,\n kernel_size=kernel_size,\n stride=stride,\n padding=padding,\n dilation=dilation,\n groups=groups,\n bias=False)",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def test_on_conv_transpose_2d_dilation_padding_valid(self):\n\n # Channels/Colors, #filters, filter_size (square)\n conv_filter = objax.nn.ConvTranspose2D(1, 1, 2, dilations=2, padding=objax.ConvPadding.VALID)\n weights = objax.TrainVar(jn.array([[[[1., 2.], [3., 4.]]]]).transpose((2, 3, 1, 0)))\n conv_filter.w = weights\n image = jn.array([[[[2., 1., 3., 4.], [5., 6., 7., 8.],\n [9., 10., 11., 12.], [13., 14., 15., 16.]]]])\n # NCHW: Batch, Channels/Colors, Height, Width\n features = conv_filter(image)\n expected_features = jn.array([[[[2., 1., 7., 6., 6., 8.],\n [5., 6., 17., 20., 14., 16.],\n [15., 13., 46., 48., 34., 40.],\n [28., 32., 82., 92., 58., 64.],\n [27., 30., 69., 76., 44., 48.],\n [39., 42., 97., 104., 60., 64.]]]])\n self.assertEqual(features.shape, (1, 1, 6, 6))\n self.assertTrue(jn.array_equal(features, expected_features))",
"def schedule_conv2d_NCHWc(num_filter, kernel_size, stride, padding, outs):\n s = tvm.create_schedule([x.op for x in outs])\n\n def traverse(op):\n \"\"\"Traverse operators from computation graph\"\"\"\n # inline all one-to-one-mapping operators except the last stage (output)\n if tag.is_broadcast(op.tag):\n if op not in s.outputs:\n s[op].compute_inline()\n for tensor in op.input_tensors:\n if tensor.op.input_tensors:\n traverse(tensor.op)\n\n if 'conv2d_NCHWc' in op.tag:\n conv_out = op.output(0)\n kernel = conv_out.op.input_tensors[1]\n data_vec = conv_out.op.input_tensors[0]\n data = data_vec.op.input_tensors[0] \\\n if isinstance(data_vec.op, tvm.tensor.ComputeOp) and \"pad\" not in data_vec.op.tag \\\n else data_vec\n if isinstance(data.op, tvm.tensor.ComputeOp) and \"pad\" in data.op.tag:\n data_pad = data\n data = data_pad.op.input_tensors[0]\n\n n, ic_chunk, h, w, ic_block = [x.value for x in data.shape]\n ic = ic_chunk * ic_block\n original_data = tvm.placeholder((n, ic, h, w), dtype=conv_out.dtype)\n\n kh, kw = kernel_size\n original_kernel = tvm.placeholder((num_filter, ic, kh, kw), dtype=conv_out.dtype)\n\n wkl = _get_workload(original_data, original_kernel, stride, padding, conv_out.dtype)\n sch = _get_schedule(wkl)\n _SCH_TO_SCH_FUNC[type(sch)](s, wkl, data_vec,\n kernel, conv_out, outs[0])\n\n traverse(outs[0].op)\n return s",
"def expanding_block(n_filters, pool_size, kernel_size=(2, 2, 2),\r\n strides=(2, 2, 2),\r\n deconvolution=False):\r\n if deconvolution:\r\n return Deconvolution3D(filters=n_filters, kernel_size=kernel_size,\r\n strides=strides)\r\n else:\r\n return UpSampling3D(size=pool_size)",
"def vvc_filters_2d(kernel_size):\n vvc_filters = []\n half_kernel = (kernel_size - 8) // 2\n for frac_pos in frac_positions():\n filter_x = filter_coefficients(int(frac_pos.split(\",\")[0]))\n filter_y = filter_coefficients(int(frac_pos.split(\",\")[1]))\n\n filter_vvc = np.tile(filter_x, 8).reshape((8, 8))\n for index in range(len(filter_y)):\n filter_vvc[index, :] *= filter_y[index]\n filter_vvc = filter_vvc / (64 * 64)\n\n vvc_filters.append(np.pad(filter_vvc, ((half_kernel + 1, half_kernel), (half_kernel + 1, half_kernel)),\n 'constant', constant_values=0))\n return vvc_filters",
"def calc_conv_out_dims(X_shape, W_shape, stride=1, pad=0, dilation=0):\n\tdummy = np.zeros(X_shape)\n\ts, p, d = stride, pad, dilation\n\tif len(X_shape) == 3:\n\t\t_, p = pad1D(dummy, p)\n\t\tpw1, pw2 = p\n\t\tfw, in_ch, out_ch = W_shape\n\t\tn_ex, in_length, in_ch = X_shape\n\n\t\t_fw = fw * (d+1) - d\n\t\tout_length = (in_length + pw1 + pw2 - _fw) // s + 1\n\t\tout_dim = (n_ex, out_length, out_ch)\n\n\telif len(X_shape) == 4:\n\t\t_, p = pad2D(dummy, p)\n\t\tpr1, pr2, pc1, pc2 = p\n\t\tfr, fc, in_ch, out_ch = W_shape\n\t\tn_ex, in_rows, in_cols, in_ch = X_shape\n\n\t\t# adjust effective filter size to account for dilation\n\t\t_fr, _fc = fr * (d+1) - d, fc * (d+1) - d\n\t\tout_rows = (in_rows + pr1 + pr2 - _fr) // s + 1\n\t\tout_cols = (in_cols + pc1 + pc2 - _fc) // s + 1\n\t\tout_dims = (n_ex, out_rows, out_cols, out_ch)\n\telse:\n\t\traise ValueError(\"unrecognized number of the input dims: {}\".format(len(X_shape)))",
"def get_unet1(patch_height, patch_width, channels, n_classes):\n axis = 3\n k = 3 # kernel size\n s = 2 # stride\n n_filters = 32 # number of filters 通道数\n\n #初始化keras张量\n inputs = Input((patch_height, patch_width, channels))\n\n # n_filters:输出的维度 (k,k):卷积核尺寸 padding:边缘填充\n # 400,400,3 ==> 400,400,32\n conv1 = Conv2D(n_filters, (k, k), padding='same')(inputs)\n conv1 = BatchNormalization(scale=False, axis=axis)(conv1)\n conv1 = Activation('relu')(conv1)\n conv1 = Conv2D(n_filters, (k, k), padding='same')(conv1)\n conv1 = BatchNormalization(scale=False, axis=axis)(conv1)\n conv1 = Activation('relu')(conv1)\n # 400,400,32 ==> 200,200,32\n pool1 = MaxPooling2D(pool_size=(s, s))(conv1)\n\n # 200,200,32 ==> 200,200,32\n conv2 = Conv2D(n_filters, (k, k), padding='same')(pool1)\n conv2 = BatchNormalization(scale=False, axis=axis)(conv2)\n conv2 = Activation('relu')(conv2)\n conv2 = Conv2D(n_filters, (k, k), padding='same')(conv2)\n conv2 = BatchNormalization(scale=False, axis=axis)(conv2)\n conv2 = add([pool1, conv2])\n conv2 = Activation('relu')(conv2)\n x2 = Conv2D(2 * n_filters, (k, k), padding='same')(conv2)\n # 200,200,32 ==> 100,100,64\n output2 = Conv2D(n_filters * 2, (1,1),padding='same',strides=s)(conv2)\n\n # 200,200,32 ==> 100,100,64\n conv3 = Conv2D(2 * n_filters, (k, k), padding='same',strides=s)(conv2)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = Activation('relu')(conv3)\n # 100,100,64 ==> 100,100,64\n conv3 = Conv2D(2 * n_filters, (k, k), padding='same')(conv3)\n conv3 = BatchNormalization(scale=False, axis=axis)(conv3)\n conv3 = add([output2,conv3])\n conv3 = Activation('relu')(conv3)\n x3 = Conv2D(4 * n_filters, (k, k), padding='same')(conv3)\n # 100,100,64 ==> 50,50,128\n output3 = Conv2D(n_filters * 4,(1,1),padding='same',strides=s)(conv3)\n\n # 100,100,64 ==> 50,50,128\n conv4 = Conv2D(4 * n_filters, (k, k), padding='same',strides=s)(conv3)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = Activation('relu')(conv4)\n conv4 = Conv2D(4 * n_filters, (k, k), padding='same')(conv4)\n conv4 = BatchNormalization(scale=False, axis=axis)(conv4)\n conv4 = add([output3,conv4])\n conv4 = Activation('relu')(conv4)\n # x4 == 50,50,256\n x4 = Conv2D(8 * n_filters,(k,k),padding='same')(conv4)\n # 50,50,128 ==> 25,25,256\n output4 = Conv2D(8 * n_filters,(1,1),padding='same',strides=s)(conv4)\n\n # 50,50,128 ==> 25,25,256\n conv5 = Conv2D(8 * n_filters, (k, k), padding='same',strides=s)(conv4)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = Activation('relu')(conv5)\n conv5 = Conv2D(8 * n_filters, (k, k), padding='same')(conv5)\n conv5 = BatchNormalization(scale=False, axis=axis)(conv5)\n conv5 = add([output4,conv5])\n conv5 = Activation('relu')(conv5)\n # 25,25,256 ==> 25,25,512\n output5 = Conv2D(16 * n_filters,(1,1),padding='same')(conv5)\n\n # 先上采样放大 在进行卷积操作 相当于转置卷积 并进行拼接\n # 25,25,512 ==> 50,50,768\n up1 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(output5), x4])\n conv6 = Conv2D(8 * n_filters, (k, k), padding='same')(up1)\n conv6 = BatchNormalization(scale=False, axis=axis)(conv6)\n conv6 = Activation('relu')(conv6)\n conv6 = Conv2D(8 * n_filters, (k, k), padding='same')(conv6)\n conv6 = BatchNormalization(scale=False, axis=axis)(conv6)\n conv6 = Activation('relu')(conv6)\n\n # 50,50,768 ==> 100,100,896\n up2 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv6), x3])\n conv7 = Conv2D(4 * n_filters, (k, k), padding='same')(up2)\n conv7 = BatchNormalization(scale=False, axis=axis)(conv7)\n conv7 = Activation('relu')(conv7)\n conv7 = Conv2D(4 * n_filters, (k, k), padding='same')(conv7)\n conv7 = BatchNormalization(scale=False, axis=axis)(conv7)\n conv7 = Activation('relu')(conv7)\n\n # 100,100,896 ==> 200,200,960\n up3 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv7), x2])\n conv8 = Conv2D(2 * n_filters, (k, k), padding='same')(up3)\n conv8 = BatchNormalization(scale=False, axis=axis)(conv8)\n conv8 = Activation('relu')(conv8)\n conv8 = Conv2D(2 * n_filters, (k, k), padding='same')(conv8)\n conv8 = BatchNormalization(scale=False, axis=axis)(conv8)\n conv8 = Activation('relu')(conv8)\n\n # 200,200,960 ==> 400,400,992\n up4 = Concatenate(axis=axis)([UpSampling2D(size=(s, s))(conv8), conv1])\n conv9 = Conv2D(n_filters, (k, k), padding='same')(up4)\n conv9 = BatchNormalization(scale=False, axis=axis)(conv9)\n conv9 = Activation('relu')(conv9)\n conv9 = Conv2D(n_filters, (k, k), padding='same')(conv9)\n conv9 = BatchNormalization(scale=False, axis=axis)(conv9)\n conv9 = Activation('relu')(conv9)\n\n # 全连接层 400,400,992 ==> 400,400,5\n outputs = Conv2D(n_classes, (1, 1), padding='same', activation='softmax')(conv9)\n\n unet = Model(inputs=inputs, outputs=outputs)\n return unet",
"def _get_same_padding_conv_nd(\n image_size: list[int], kernel_size: tuple[int, ...], dilation: tuple[int, ...], stride: tuple[int, ...]\n) -> list[int]:\n # get number of spatial dimensions, corresponds to kernel size length\n num_dims = len(kernel_size)\n\n # additional checks to populate dilation and stride (in case they are single entry tuples)\n if len(dilation) == 1:\n dilation = dilation * num_dims\n\n if len(stride) == 1:\n stride = stride * num_dims\n\n # equation to calculate (pad^+ + pad^-) size\n _pad_size: list[int] = [\n max((math.ceil(_i_s / _s) - 1) * _s + (_k_s - 1) * _d + 1 - _i_s, 0)\n for _i_s, _k_s, _d, _s in zip(image_size, kernel_size, dilation, stride)\n ]\n # distribute paddings into pad^+ and pad^- following Tensorflow's same padding strategy\n _paddings: list[tuple[int, int]] = [(_p // 2, _p - _p // 2) for _p in _pad_size]\n\n # unroll list of tuples to tuples, and then to list\n # reversed as nn.ConstantPadNd expects paddings starting with last dimension\n _paddings_ret: list[int] = [outer for inner in reversed(_paddings) for outer in inner]\n return _paddings_ret"
] | [
"0.8039668",
"0.7764517",
"0.73641586",
"0.6590056",
"0.65151554",
"0.6472721",
"0.6244569",
"0.6239202",
"0.61814755",
"0.6160284",
"0.6126702",
"0.61187077",
"0.6105487",
"0.6101732",
"0.60931444",
"0.6072241",
"0.6063233",
"0.60537577",
"0.60482925",
"0.6038803",
"0.6030432",
"0.5996218",
"0.59803474",
"0.59495187",
"0.594916",
"0.5946113",
"0.59278244",
"0.5915654",
"0.59024596",
"0.5869556"
] | 0.80006856 | 1 |
build a stack of blocks | def _make_stack(self, block, num_layers, inplanes, outplanes, kernel_size=3,
SE=False, expansion=3, stride=1):
norm_layer = self._norm_layer
act_layer = self._act_layer
downsample = None
# if stride > 1
# or if block input planes != block output planes (only possible for first block in stack)
# downsamples skip connection by 1x1-conv filter
if stride != 1 or inplanes != outplanes:
downsample = nn.Sequential(
conv1x1(inplanes, outplanes, stride=stride),
norm_layer(outplanes)
)
layers = []
# first block in stack can have stride > 1
layers.append(block(inplanes, outplanes, expansion=expansion, kernel_size=kernel_size,
SE=SE, stride=stride, dropout=self._dropout, downsample=downsample,
norm_layer=norm_layer, act_layer=act_layer))
# other layers in stack
# for each layer: inplanes = outplanes, stride=1, downsample=None
for _ in range(1, num_layers):
layers.append(block(outplanes, outplanes, expansion=expansion, kernel_size=kernel_size,
SE=SE, stride=1, dropout=self._dropout, norm_layer=norm_layer,
act_layer=act_layer))
return nn.Sequential(*layers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _make_stack(self, block, planes, blocks, stride=1, dilate=False):\n\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n\n # use dilation instead of striding if true\n if dilate:\n self.dilation *= stride\n stride = 1\n\n # apply conv-1x1 to input identity if stride > 1 or output channels != input channels for dim. matching\n if stride != 1 or self.inplanes != planes * block.expansion:\n downsample = nn.Sequential(\n conv1x1(self.inplanes, planes * block.expansion, stride),\n norm_layer(planes * block.expansion)\n )\n\n layers = []\n # first layer\n # input = batch_size x self.inplanes x H x H\n # output = batch_size x planes * block.expansion x H/stride x H/stride\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer))\n self.inplanes = planes * block.expansion\n # subsequent layers\n for _ in range(1, blocks):\n # input = output = batch_size x planes * block.expansion x H' x H'\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer))\n\n return nn.Sequential(*layers)",
"def build(self, block_size):",
"def build_nested_blocks(self):\n pass",
"def __init__(self, total_length:int, initial_y:int, screen, number:int, debug:bool = False):\n\n #Call the superclass\n super().__init__()\n\n #Create the group of blocks based on x and y and add them to the group\n for k in range(number):\n for i in range(-1,2):\n for j in range(-2,3):\n self.add(Block(total_length * (k+1) // (number+1) + 10*j, initial_y + 10*i, screen, debug = debug))",
"def collect_blocks():\n\n # Below are the position of (c,r) in a block.\n\n #########################\n # (0,0) # (1,0) # (2,0) #\n #########################\n #########################\n # (0,1) # (1,1) # (2,1) #\n #########################\n #########################\n # (0,2) # (1,2) # (2,2) #\n #########################\n\n for x in range(72):\n r, c = x // 9 % 3, x % 3\n if r == 0:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n yield x, x + 19\n yield x, x + 20\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n yield x, x + 17\n yield x, x + 19\n else:\n yield x, x + 7\n yield x, x + 8\n yield x, x + 16\n yield x, x + 17\n elif r == 1:\n if c == 0:\n yield x, x + 10\n yield x, x + 11\n elif c == 1:\n yield x, x + 8\n yield x, x + 10\n else:\n yield x, x + 8\n yield x, x + 7",
"def stack3(x, filters, blocks, freeze_bn=True, stride1=2, groups=32, name=None):\n x = block3(x, filters, freeze_bn, stride=stride1, groups=groups, name=name + '_block1')\n for i in range(2, blocks + 1):\n x = block3(x, filters, freeze_bn, groups=groups, conv_shortcut=False,\n name=name + '_block' + str(i))\n return x",
"def stack1(x, filters, blocks, stride1=2, name='', norm_use=\"bn\"):\n x = block1(\n x=x,\n filters=filters,\n stride=stride1,\n name=name + '_block1',\n norm_use=norm_use,\n )\n for i in range(2, blocks + 1):\n x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i), norm_use=norm_use)\n\n return x",
"def build_blocks():\n block_1 = GRect(375, 80, x=20, y=330)\n block_1.filled = True\n block_1.color = 'firebrick'\n block_1.fill_color = 'firebrick'\n window.add(block_1)\n block_2 = GRect(375, 80, x=405, y=330)\n block_2.filled = True\n block_2.color = 'steelblue'\n block_2.fill_color = 'steelblue'\n window.add(block_2)\n block_3 = GRect(375, 80, x=20, y=420)\n block_3.filled = True\n block_3.color = 'goldenrod'\n block_3.fill_color = 'goldenrod'\n window.add(block_3)\n block_4 = GRect(375, 80, x=405, y=420)\n block_4.filled = True\n block_4.color = 'forestgreen'\n block_4.fill_color = 'forestgreen'\n window.add(block_4)\n block_5 = GRect(60, 40, x=720, y=120)\n block_5.filled = True\n block_5.color = 'dodgerblue'\n block_5.fill_color = 'dodgerblue'\n window.add(block_5)\n circle_1 = GOval(90, 90, x=20, y=170)\n circle_1.filled = True\n circle_1.color = 'blueviolet'\n circle_1.fill_color = 'blueviolet'\n window.add(circle_1)",
"def stack3(x, filters, blocks, stride1=2, groups=32, name='', norm_use=\"bn\"):\n x = block3(x, filters, stride=stride1, groups=groups, name=name + '_block1', norm_use=norm_use)\n for i in range(2, blocks + 1):\n x = block3(\n x,\n filters,\n groups=groups,\n conv_shortcut=False,\n name=name + '_block' + str(i),\n norm_use=norm_use\n )\n return x",
"def stack2(x, filters, blocks, stride1=2, name='', norm_use=\"bn\"):\n x = block2(x, filters, conv_shortcut=True, name=name + '_block1', norm_use=norm_use)\n for i in range(2, blocks):\n x = block2(x, filters, name=name + '_block' + str(i), norm_use=norm_use)\n x = block2(x, filters, stride=stride1, name=name + '_block' + str(blocks), norm_use=norm_use)\n return x",
"def add_blocks(self, bottom_left, top_right, texture, immediately=False):\n x, y, z = bottom_left\n X, Y, Z = top_right\n assert ((X - x) * (Y - x) * (Z - z)) <= 500000, \"Unable to fill more than 500,000 blocks. Number of blocks: {}\"\\\n .format((X - x) * (Y - x) * (Z - z))\n\n for x_coord in range(x, X, 1):\n for y_coord in range(y, Y, 1):\n for z_coord in range(z, Z, 1):\n self.add_block((x_coord, y_coord, z_coord), texture, immediately=immediately)",
"def basestack(p, i, j, k, l):\n return _RNAstructure_wrap.basestack(p, i, j, k, l)",
"def stack1(x, filters, blocks, stride1=2, name=None):\n x = block1(x, filters, stride=stride1, name=name + '_block1')\n for i in range(2, blocks + 1):\n x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))\n return x",
"def stack(self, *args, **kwargs):\n return self._block(*args, container=\"stack\", **kwargs)",
"def stacking_blocks(context, network):\n\n # init\n if const.INIT not in context:\n init_rig_hierarchy(context, network)\n\n # blocks list, context\n blocks = list()\n blocks.append(network)\n blocks_list(network, blocks)\n blocks = [x for x in blocks if not x.hasAttr(const.ISLEGO)]\n for block in blocks:\n common_name, joint_name, side_name, con_exp, jnt_exp = context[const.NAMING]\n block_name = block.attr(const.BLOCK_NAME).get()\n block_side = side_name[block.attr(const.BLOCK_SIDE).get()]\n block_index = block.attr(const.BLOCK_INDEX).get()\n common_name = common_name.format(name=block_name,\n side=block_side,\n index=block_index,\n description=\"{description}\",\n extension=\"{extension}\")\n\n joint_name = joint_name.format(name=block_name,\n side=block_side,\n index=block_index,\n description=\"{description}\",\n extension=\"{extension}\")\n\n name = lib.get_context_name(block, side_name)\n if name not in context:\n log.log(level=0, msg=\"context {0}\".format(name))\n context[name] = OrderedDict()\n context[name][const.NAMING] = (common_name, joint_name, con_exp, jnt_exp)\n else:\n raise Exception(\"already context {0}\".format(name))\n\n # import blocks\n modules = list()\n for index, block in enumerate(blocks):\n common_name, joint_name, side_name, con_exp, jnt_exp = context[const.NAMING]\n block_type = block.attr(const.BLOCK_TYPE).get()\n\n name = lib.get_context_name(block, side_name)\n log.log(level=0, msg=\"init {0}\".format(name))\n modules.append(importlib.import_module(\"lego.blocks.legobox.{0}\".format(block_type)))\n # importlib.reload(modules[index])\n\n # create objects\n for index, module in enumerate(modules):\n side_name = context[const.NAMING][2]\n name = lib.get_context_name(blocks[index], side_name)\n log.log(level=0, msg=\"Objects {0}\".format(name))\n module.create_objects(context, name, blocks[index])\n\n # create attributes\n for index, module in enumerate(modules):\n side_name = context[const.NAMING][2]\n name = lib.get_context_name(blocks[index], side_name)\n log.log(level=0, msg=\"Attribute {0}\".format(name))\n module.create_attributes(context, name, blocks[index])\n\n # create connections\n for index, module in enumerate(modules):\n side_name = context[const.NAMING][2]\n name = lib.get_context_name(blocks[index], side_name)\n log.log(level=0, msg=\"Connections {0}\".format(name))\n module.create_connections(context, name, blocks[index])\n\n # connect joints\n for index, module in enumerate(modules):\n side_name = context[const.NAMING][2]\n name = lib.get_context_name(blocks[index], side_name)\n log.log(level=0, msg=\"Joint structure {0}\".format(name))\n module.joint_structure(context, name, blocks[index])",
"def create_hard_blocks(self):\n for x in xrange(1, self.map_size[0], 2):\n for y in xrange(1, self.map_size[1], 2):\n self.create_hard_block_at(x, y)",
"def NewBlock(self):\n for i in self.matrix:\n if 2 in i:\n return()\n blockType = self.bag.Choose()\n subtractor = {\"I\" : 4, \"J\" : 3, \"L\" : 3, \"O\" : 2, \"S\" : 3, \"T\" : 3, \"Z\": 3}\n x = random.randint(0, self.width - subtractor.get(blockType))\n coords = []\n if blockType == \"I\":\n coords = [(x + i, 0) for i in range(4)]\n elif blockType == \"J\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x, 1))\n elif blockType == \"L\":\n coords = [(x + i, 0) for i in range(3)]\n coords.append((x + 2, 1))\n elif blockType == \"O\":\n coords = [(x, 0), (x + 1, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"Z\":\n coords = [(x, 0), (x + 1, 0), (x + 1, 1), (x + 2, 1)]\n elif blockType == \"S\":\n coords = [(x + 1, 0), (x + 2, 0), (x, 1), (x + 1, 1)]\n elif blockType == \"T\":\n coords = [(x, 0), (x + 1, 0), (x + 2, 0), (x + 1, 1)]\n self.coords = coords\n return(coords)",
"def create_stack():\n\n return Stack()",
"def get_blocks(index):\r\n #call with -1 to get full blocklist\r\n #the reason this is a function instead of just a list is that originally\r\n #i had plans to support dynamic tilesets, for example if only a certain\r\n #number of each tile were available. in the end this didnt happen though\r\n all_blocks = [\r\n [[0,0,0],[1,1,1],[0,0,0]], #0 - (horizontal passage)\r\n [[0,1,0],[0,1,0],[0,1,0]], #1 | (vertical passage)\r\n \r\n [[0,0,0],[1,1,0],[0,1,0]], #2 >v various L-junctions\r\n [[0,1,0],[1,1,0],[0,0,0]], #3 >^\r\n [[0,0,0],[0,1,1],[0,1,0]], #4 ^>\r\n [[0,1,0],[0,1,1],[0,0,0]], #5 v>\r\n \r\n [[0,0,0],[0,0,0],[0,0,0]], #6 0 empty\r\n [[0,1,0],[1,1,1],[0,1,0]], #7 + cross\r\n \r\n [[0,1,0],[1,1,1],[0,0,0]], #8 _|_ various T-junctions\r\n [[0,0,0],[1,1,1],[0,1,0]], #9 T\r\n [[0,1,0],[1,1,0],[0,1,0]], #10 -|\r\n [[0,0,0],[1,1,1],[0,0,0]]] #11 |-\r\n \r\n# [[0,1,0],[0,1,0],[0,0,0]], #12 #unsued \"dead end\" pieces\r\n# [[0,0,0],[0,1,0],[0,1,0]], #13\r\n# [[0,0,0],[0,1,1],[0,0,0]], #14\r\n# [[0,0,0],[1,1,0],[0,0,0]] ]#15\r\n if index == -1:\r\n return all_blocks\r\n else:\r\n return all_blocks[index]",
"def make_stack(tb, stack=None):\n if stack is None:\n stack = []\n if tb is not None:\n make_stack(tb.tb_next, stack)\n stack.append(tb)\n return stack",
"def __init__(self):\n self.stackIn = []\n self.stackOut = []",
"def __init__(self,\n num_heads=8,\n seq_len=1024,\n block=16,\n different_layout_per_head=False,\n num_sliding_window_blocks=3,\n global_block_indices=[0],\n global_block_end_indices=None):\n super().__init__(num_heads, seq_len, block, different_layout_per_head)\n\n if (self.num_blocks < num_sliding_window_blocks):\n raise ValueError(\n f'Number of sliding window blocks, {num_sliding_window_blocks}, must be smaller than overal number of blocks in a row, {self.num_blocks}!'\n )\n self.num_sliding_window_blocks = num_sliding_window_blocks\n\n if (self.num_blocks < len(global_block_indices)):\n raise ValueError(\n f'Number of global blocks indices, {global_block_indices}, must be smaller than overal number of blocks in a row, {self.num_blocks}!'\n )\n for idx in global_block_indices:\n if idx >= self.num_blocks:\n raise ValueError(\n f'Global block index, {global_block_indices[idx]}, must be smaller than overal number of blocks in a row, {self.num_blocks}!'\n )\n self.global_block_indices = global_block_indices\n\n if (global_block_end_indices is not None):\n if (len(global_block_indices) != len(global_block_end_indices)):\n raise ValueError(\n f'Global block start indices length, {len(global_block_indices)}, must be same as global block end indices length, {len(global_block_end_indices)}!'\n )\n for _, (start_idx, end_idx) in enumerate(zip(global_block_indices, global_block_end_indices)):\n if end_idx > self.num_blocks:\n raise ValueError(\n f'Global block end index, {global_block_end_indices[idx]}, must be smaller (equal) than overal number of blocks in a row, {self.num_blocks}!'\n )\n if start_idx >= end_idx:\n raise ValueError(\n f'Global block start index, {start_idx}, must be smaller than global block end index, {end_idx}!'\n )\n self.global_block_end_indices = global_block_end_indices\n self.make_layout()",
"def Stack(self, name, blocks, output_all_layer_hiddens=False):\n if output_all_layer_hiddens:\n graph_inputs = ['input']\n graph_outputs = []\n graph_modules = []\n layer_input = 'input'\n for idx, block in enumerate(blocks):\n layer_output = 'output_{}'.format(idx)\n graph_modules.append((f'{layer_input}->{layer_output}', block))\n graph_outputs.append(layer_output)\n layer_input = layer_output\n return self._Graph(name, graph_inputs, graph_outputs, *graph_modules)\n else:\n return self._MaybeSplit(name, blocks) or self._Seq(name, *blocks)",
"def _generate_next_blocks(self):\n next_block = choice(LEVELS)\n if self.is_start is False and self.next_region == 0:\n last = self.string_level_blocks[-1][1]\n self.string_level_blocks.append((self.next_region, last))\n self._get_sprites_block(last, self.next_region)\n else:\n if self.next_region != 0:\n self.string_level_blocks.append((self.next_region, next_block))\n self._get_sprites_block(next_block, self.next_region)\n else:\n if len(self.string_level_blocks) > 0:\n last = self.string_level_blocks[-1][1]\n self.string_level_blocks.append((self.next_region, last))\n self._get_sprites_block(last, self.next_region)\n else:\n self.string_level_blocks.append((self.next_region, next_block))\n self._get_sprites_block(next_block, self.next_region)\n self.next_region += 1\n self.next_region %= AMOUNT_REGIONS_TO_DRAW",
"def stack_with_upsampling(x, filters, blocks, up_stride_last=2, groups=32, name=None):\n x = block(x, filters, groups=groups, conv_shortcut=True, name=name + '_block1')\n for i in range(2, blocks):\n x = block(x, filters, groups=groups, conv_shortcut=False, name=name + '_block' + str(i))\n x = block(x, filters, up_stride=up_stride_last, groups=groups, name=name + '_block' + str(blocks))\n return x",
"def __init__(self):\r\n # 初始化两个列表,当作栈来使用\r\n self.stack = []",
"def __init__(self):\n self.stack = []\n self.stackMin = []",
"def matrix_list_to_blocks(focks, frames, orbs, cg, progress = (lambda x: x)):\n\n blocks = couple_blocks(matrix_to_blocks(focks[0], frames[0], orbs), cg)\n slices = [{}]\n for k in blocks.keys():\n slices[0][k] = {}\n for orb in blocks[k]:\n if len(blocks[k][orb]) == 0:\n slices[0][k][orb] = slice(0, 0)\n continue\n L0 = list(blocks[k][orb].keys())[0]\n slices[0][k][orb] = slice(0, len(blocks[k][orb][L0]))\n for ifr in progress(range(1,len(frames))):\n fc = couple_blocks(matrix_to_blocks(focks[ifr], frames[ifr], orbs), cg)\n slices.append({})\n for k in fc.keys():\n slices[-1][k] = {}\n for orb in fc[k]:\n if len(fc[k][orb]) == 0:\n slices[-1][k][orb] = slice(0, 0)\n continue\n L0 = list(fc[k][orb].keys())[0]\n if not orb in blocks[k]:\n # extend the blocks if more orbital combinations appear\n blocks[k][orb] = fc[k][orb]\n slices[-1][k][orb] = slice(0, len(blocks[k][orb][L0]))\n else:\n slices[-1][k][orb] = slice(len(blocks[k][orb][L0]),\n len(blocks[k][orb][L0])+len(fc[k][orb][L0]) )\n for L in fc[k][orb]:\n blocks[k][orb][L] = np.vstack([blocks[k][orb][L], fc[k][orb][L] ] )\n return blocks, slices",
"def __init__(self):\r\n self.stack = []\r\n self.minStack = []",
"def form_blocks(instrs):\n\n # Start with an empty block.\n cur_block = []\n\n for instr in instrs:\n if 'op' in instr: # It's an instruction.\n # Add the instruction to the currently-being-formed block.\n cur_block.append(instr)\n\n # If this is a terminator (branching instruction), it's the\n # last instruction in the block. Finish this block and\n # start a new one.\n if instr['op'] in TERMINATORS:\n yield cur_block\n cur_block = []\n\n else: # It's a label.\n # End the block here (if it contains anything).\n if cur_block:\n yield cur_block\n\n # Start a new block with the label.\n cur_block = [instr]\n\n # Produce the final block, if any.\n if cur_block:\n yield cur_block"
] | [
"0.6453076",
"0.6413551",
"0.6280478",
"0.6205604",
"0.6200286",
"0.61282814",
"0.6105402",
"0.6089504",
"0.6055599",
"0.59682417",
"0.5961837",
"0.5952528",
"0.5947952",
"0.59477377",
"0.59387773",
"0.5916841",
"0.5901226",
"0.5894852",
"0.5885826",
"0.5881532",
"0.58651316",
"0.5851886",
"0.5827696",
"0.5818939",
"0.58172613",
"0.581197",
"0.58067644",
"0.5796957",
"0.5786144",
"0.5778204"
] | 0.6464052 | 0 |
mnasneta1 w.t. 3x3MBconv3 block only | def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def block3(\n x,\n filters,\n kernel_size=3,\n stride=1,\n groups=32,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\",\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n (64 // groups) * filters,\n 1,\n strides=stride,\n use_bias=False,\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(shortcut, norm_use=norm_use, name=name + '_0_')\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n use_bias=False,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(\n kernel_size,\n strides=stride,\n depth_multiplier=c,\n use_bias=False,\n name=name + '_2_conv',\n kernel_initializer='he_normal',\n )(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(\n lambda x: sum([x[:, :, :, :, i] for i in range(c)]),\n output_shape=output_shape,\n name=name + '_2_reduce',\n )(x)\n x = layers.Reshape(x_shape + (filters, ))(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, kernel_initializer='he_normal',\n use_bias=False, name=name + '_3_conv')(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )",
"def TCN_V3(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 128\n\n config = [ \n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model",
"def __init__(self, dropout_rate=0.0, in_channels=3):\n\n super(MaskNet, self).__init__()\n\n self.prep_block_1 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n self.prep_block_2 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock1 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, padding=0),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock3 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, padding=0),\n )",
"def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Shuffle, self).__init__()\r\n print(\"CIFAR VGG16_Shuffle is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n\r\n # Define the building blocks\r\n if layer == 11:\r\n self.conv11 = CONV_3x3shuffle(3, 64, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 12:\r\n self.conv12 = nn.Sequential(CONV_3x3shuffle(64, 64, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv12 = CONV_3x3(64, 64, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 21:\r\n self.conv21 = CONV_3x3shuffle(64, 128, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 22:\r\n self.conv22 = nn.Sequential(CONV_3x3shuffle(128, 128, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv22 = CONV_3x3(128, 128, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 31:\r\n self.conv31 = CONV_3x3shuffle(128, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 32:\r\n self.conv32 = CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 33:\r\n self.conv33 = nn.Sequential(CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv33 = CONV_3x3(256, 256, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 41:\r\n self.conv41 = CONV_3x3shuffle(256, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 42:\r\n self.conv42 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 43:\r\n self.conv43 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv43 = CONV_3x3(512, 512, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 51:\r\n self.conv51 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 52:\r\n self.conv52 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 53:\r\n self.conv53 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def vecvari1(array,W,B=None,sqrt=False,BB=False,BS=False,verbose=False,sizz=1,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n \r\n arrs=array.shape\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n if verbose:\r\n print(\"VECVARI1:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n bt=len(B.shape)==2\r\n xi=(-2,-1)#xi=(-1,-2)\r\n x2=(-3,-2,-1)\r\n if len(ashp)==5 :#not all data and all weights == 3d data\r\n xi=(-3,-2,-1)\r\n x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n\r\n if BB :\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n if sizz==1:#not a good idea\r\n mean=np.sum((mul),axis=xi,keepdims=True)/size\r\n else:\r\n mean=np.sum((mul),axis=xi,keepdims=True)/np.broadcast_to([ashp[-2]*ashp[-1]],(ashp[1],1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshape\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n else:\r\n mul=((array-mean)*W)\r\n i=(np.square(mul))/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)#B could be included\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size#B could be included\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B#B could be included\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print(i.shape)\r\n if verbose==2:\r\n print('ishp',i.shape)\r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB)and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)",
"def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model",
"def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)",
"def __init__(self):\n super(DLStudio.ExperimentsWithCIFAR.Net2, self).__init__()\n self.relu = nn.ReLU()\n strides = []\n patch_size = 2\n ## conv1:\n out_ch, ker_size, conv_stride, pool_stride = 128,5,1,2\n self.conv1 = nn.Conv2d(3, out_ch, (ker_size,ker_size), padding=(ker_size-1)//2) \n self.pool1 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv2:\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = 128,3,1,2\n self.conv2 = nn.Conv2d(in_ch, out_ch, ker_size, padding=(ker_size-1)//2)\n self.pool2 = nn.MaxPool2d(patch_size, pool_stride) \n strides += (conv_stride, pool_stride)\n ## conv3: \n ## meant for repeated invocation, must have same in_ch, out_ch and strides of 1\n in_ch = out_ch\n out_ch, ker_size, conv_stride, pool_stride = in_ch,2,1,1\n self.conv3 = nn.Conv2d(in_ch, out_ch, ker_size, padding=1)\n self.pool3 = nn.MaxPool2d(patch_size, pool_stride) \n# strides += (conv_stride, pool_stride)\n ## figure out the number of nodes needed for entry into fc:\n in_size_for_fc = out_ch * (32 // np.prod(strides)) ** 2 ## (A)\n self.in_size_for_fc = in_size_for_fc\n self.fc1 = nn.Linear(in_size_for_fc, 150)\n self.fc2 = nn.Linear(150, 100)\n self.fc3 = nn.Linear(100, 10)",
"def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def subpel_conv3x3(in_ch, out_ch, r=1):\n # return nn.Sequential(\n # nn.Conv2d(in_ch, out_ch * r**2, kernel_size=3, padding=1),\n # nn.PixelShuffle(r))\n # change\n return nn.SequentialCell([nn.Conv2d(in_ch, out_ch * r**2, kernel_size=3, padding=1),\n # # TODO BY J : no pixel shuffle in mindspore\n # nn.PixelShuffle(r))\n ])",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def __init__(self, name=\"conv_3d_lstm_cell\", **kwargs):\n super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)",
"def mobile_net(\n num_classes=1000,\n data_shape=(1, 3, 224, 224),\n dtype=\"float32\",\n alpha=1.0,\n is_shallow=False,\n layout=\"NCHW\",\n):\n data = relay.var(\"data\", shape=data_shape, dtype=dtype)\n body = conv_block(data, \"conv_block_1\", int(32 * alpha), strides=(2, 2), layout=layout)\n body = separable_conv_block(\n body, \"separable_conv_block_1\", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_2\",\n int(64 * alpha),\n int(128 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_3\",\n int(128 * alpha),\n int(128 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_4\",\n int(128 * alpha),\n int(256 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_5\",\n int(256 * alpha),\n int(256 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_6\",\n int(256 * alpha),\n int(512 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n if is_shallow:\n body = separable_conv_block(\n body,\n \"separable_conv_block_7\",\n int(512 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_8\",\n int(1024 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n else:\n for i in range(7, 12):\n body = separable_conv_block(\n body,\n f\"separable_conv_block_{i}\",\n int(512 * alpha),\n int(512 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_12\",\n int(512 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_13\",\n int(1024 * alpha),\n int(1024 * alpha),\n layout=layout,\n dtype=dtype,\n )\n pool = relay.nn.global_avg_pool2d(data=body, layout=layout)\n flatten = relay.nn.batch_flatten(data=pool)\n weight = relay.var(\"fc_weight\")\n bias = relay.var(\"fc_bias\")\n fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)\n fc = relay.nn.bias_add(fc, bias)\n softmax = relay.nn.softmax(data=fc)\n return relay.Function(relay.analysis.free_vars(softmax), softmax)"
] | [
"0.65041816",
"0.6349424",
"0.6302337",
"0.6283698",
"0.6271896",
"0.6270381",
"0.62482214",
"0.6165075",
"0.61650103",
"0.6118518",
"0.6074425",
"0.60461146",
"0.5950486",
"0.5944868",
"0.592626",
"0.5922896",
"0.5890219",
"0.5874022",
"0.5871335",
"0.5861999",
"0.5842724",
"0.5840554",
"0.58123404",
"0.5804285",
"0.57960725",
"0.5792384",
"0.5791556",
"0.5774564",
"0.57649755",
"0.57523155"
] | 0.6551719 | 0 |
mnasneta1 w.t. 3x3MBconv3SE block only | def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def vecvari1(array,W,B=None,sqrt=False,BB=False,BS=False,verbose=False,sizz=1,\r\n KCD=False,mulb=False,mul2=False,v3=0,**kwargs):\r\n \r\n arrs=array.shape\r\n #array=np.expand_dims(array,len(array.shape)//2)\r\n ashp=W.shape\r\n dstp=arrs[0]-1 if not((arrs[0]-1)==0) else 1\r\n if verbose:\r\n print(\"VECVARI1:: B? {},SQRT {}, BB {}, BS {}, SIZZ {}, KCD {}, MULB {}, MUL2 {}\".format(\r\n not(B is None),bool(sqrt),bool(BB),bool(BS),sizz,bool(KCD),bool(mulb),bool(mul2)))\r\n print('arrayshape',arrs)\r\n if verbose==2:\r\n print('Wsample',W[:,:,-1,-1])\r\n else:\r\n print('Wsample',W[:,:,-1,-1])\r\n if not(B is None):\r\n print(\"Bsamp\",B)\r\n print('wshape',ashp)\r\n if B is None:\r\n B=np.zeros((1,1,1,1),dtype=np.float32)#channel\r\n bt=len(B.shape)==2\r\n xi=(-2,-1)#xi=(-1,-2)\r\n x2=(-3,-2,-1)\r\n if len(ashp)==5 :#not all data and all weights == 3d data\r\n xi=(-3,-2,-1)\r\n x2=(-4,-3,-2,-1)\r\n if v3:\r\n if mulb:#probably a bad idea\r\n mul=array+B\r\n else:\r\n mul=array\r\n else:\r\n if mulb:#probably a bad idea\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n mul=(array*W)+B\r\n else:\r\n mul=array*W\r\n size=np.sum(W,axis=xi,keepdims=True)#shape=(outputs, channel)\r\n\r\n if BB :\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n if verbose:\r\n if verbose==2:\r\n print('mulsamp',mul[:,-1,-1,::dstp],'arrsamp',array[-1,-1,:])\r\n else:\r\n print('mulsamp',mul[-1,-1,-1],'arrsamp',array[-1,-1,-1])\r\n print('sizsamp',size)\r\n print('bbb',B.shape)\r\n print(\"size\",size.shape)\r\n if sizz==1:#not a good idea\r\n mean=np.sum((mul),axis=xi,keepdims=True)/size\r\n else:\r\n mean=np.sum((mul),axis=xi,keepdims=True)/np.broadcast_to([ashp[-2]*ashp[-1]],(ashp[1],1,1))\r\n if verbose:\r\n if verbose==2:\r\n print(\"meanshape\",mean.shape)\r\n print(\"meansamp\",mean[:,:,:,::dstp,-1,-1,-1])\r\n else:\r\n print(\"meansamp\",mean[-1,:,:,-1,-1,-1,-1])\r\n print(\"etst\",mean.shape)\r\n if verbose==2:\r\n print(\"ameanshp\",(mul-mean).shape)\r\n print(\"amean\",(mul-mean)[:,:,:,::dstp,-1,-1])\r\n else:\r\n print(\"amean\",(mul-mean)[-1,-1,-1])\r\n if mul2:\r\n if mulb:#probably a bad idea\r\n mul=((array-mean)*W)+B\r\n else:\r\n mul=((array-mean)*W)\r\n i=(np.square(mul))/size\r\n else:\r\n if v3==1:\r\n if BB:\r\n i=(np.square(((array-mean)*W)+B)/size)#B could be included\r\n else:\r\n i=(np.square(((array-mean)*W))/size)#B could be included\r\n if v3==2:#not a good idea\r\n if BB:\r\n i=((np.square(array-mean)*W)+B)/size#B could be included\r\n else:\r\n i=((np.square(array-mean)*W))/size#B could be included\r\n if v3==3:\r\n if BB:\r\n i=((np.square(array-mean)/size)*W)+B#B could be included\r\n else:\r\n i=((np.square(array-mean)/size)*W)#B could be included\r\n else:\r\n if BB:\r\n i=(np.square((mul)-mean)+B)/size\r\n else:\r\n i=(np.square((mul)-mean))/size\r\n if KCD:\r\n out=np.sum(i,axis=xi)\r\n else:\r\n out=np.rollaxis(np.sum(i,axis=x2),-1,1)\r\n if verbose:\r\n print(i.shape)\r\n if verbose==2:\r\n print('ishp',i.shape)\r\n print('isample',i[:,-1,-1,::dstp],i.dtype)\r\n else:\r\n print('isample',i[-1,-1,-1],i.dtype)\r\n if sqrt:\r\n out=np.sqrt(out)\r\n if verbose:\r\n if verbose==2:\r\n print('oushp',out.shape)\r\n print(\"outsample\",out[:,::dstp,-1,-1])\r\n else:\r\n print(\"outsample\",out[-1,-1,-1])\r\n print(\"out\",out.shape,(arrs[0],ashp[0],arrs[1],arrs[2]))\r\n if KCD:\r\n out=np.reshape(out,(arrs[0],ashp[0]*arrs[-3],arrs[1],arrs[2]))\r\n else:\r\n assert out.shape==(arrs[0],ashp[0],arrs[1],arrs[2])\r\n if not(BB)and BS:\r\n B=np.reshape(B,(*B.shape,*[1 for _ in range(len(ashp)-len(B.shape))]))\r\n return(out+B[:,0])\r\n else:\r\n return(out)",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def c3d(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='valid', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool3'))\n # 4th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='valid', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n # model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n # border_mode='valid', name='pool5', dim_ordering=\"tf\"))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5', dim_ordering=\"tf\"))\n model.add(Flatten())\n\n # FC layers group\n model.add(Dense(4096, activation='relu', name='fc6'))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', name='fc7'))\n model.add(Dropout(0.5))\n model.add(Dense(self.nb_classes, activation='softmax'))\n for layer in model.layers:\n print(layer.output_shape)\n return model",
"def block3(\n x,\n filters,\n kernel_size=3,\n stride=1,\n groups=32,\n conv_shortcut=True,\n name='',\n norm_use=\"bn\",\n):\n if conv_shortcut is True:\n shortcut = layers.Conv2D(\n (64 // groups) * filters,\n 1,\n strides=stride,\n use_bias=False,\n name=name + '_0_conv',\n )(x)\n shortcut = normalize_layer(shortcut, norm_use=norm_use, name=name + '_0_')\n else:\n shortcut = x\n\n x = layers.Conv2D(\n filters,\n 1,\n use_bias=False,\n name=name + '_1_conv',\n kernel_initializer='he_normal',\n )(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_1_')\n x = layers.Activation('relu', name=name + '_1_relu')(x)\n\n c = filters // groups\n x = layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name=name + '_2_pad')(x)\n x = layers.DepthwiseConv2D(\n kernel_size,\n strides=stride,\n depth_multiplier=c,\n use_bias=False,\n name=name + '_2_conv',\n kernel_initializer='he_normal',\n )(x)\n x_shape = backend.int_shape(x)[1:-1]\n x = layers.Reshape(x_shape + (groups, c, c))(x)\n output_shape = x_shape + (groups, c) if backend.backend() == 'theano' else None\n x = layers.Lambda(\n lambda x: sum([x[:, :, :, :, i] for i in range(c)]),\n output_shape=output_shape,\n name=name + '_2_reduce',\n )(x)\n x = layers.Reshape(x_shape + (filters, ))(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_2_')\n x = layers.Activation('relu', name=name + '_2_relu')(x)\n\n x = layers.Conv2D((64 // groups) * filters, 1, kernel_initializer='he_normal',\n use_bias=False, name=name + '_3_conv')(x)\n x = normalize_layer(x, norm_use=norm_use, name=name + '_3_')\n\n x = layers.Add(name=name + '_add')([shortcut, x])\n x = layers.Activation('relu', name=name + '_out')(x)\n return x",
"def c3dLSTM(self):\n model = Sequential()\n # 1st layer group\n model.add(Conv3D(64, 3, 3, 3, activation='relu',\n border_mode='same', name='conv1',\n subsample=(1, 1, 1),\n input_shape=self.input_shape))\n model.add(MaxPooling3D(pool_size=(1, 2, 2), strides=(1, 2, 2),\n border_mode='same', name='pool1'))\n # 2nd layer group\n model.add(Conv3D(128, 3, 3, 3, activation='relu',\n border_mode='same', name='conv2',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool2'))\n # 3rd layer group\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(256, 3, 3, 3, activation='relu',\n border_mode='same', name='conv3b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool3'))\n # 4th layer groupprint(layer.get_shape())\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv4b',\n subsample=(1, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool4'))\n\n # 5th layer group\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5a',\n subsample=(1, 1, 1)))\n model.add(Conv3D(512, 3, 3, 3, activation='relu',\n border_mode='same', name='conv5b',\n subsample=(1, 1, 1)))\n model.add(ZeroPadding3D(padding=(0, 1, 1)))\n model.add(MaxPooling3D(pool_size=(2, 2, 2), strides=(2, 2, 2),\n border_mode='same', name='pool5'))\n shaped=model.layers[13].output.get_shape()\n print(shaped[1], shaped[2], shaped[3])\n # model.add(ConvLSTM2D(512, kernel_size=(3, 3), activation='sigmoid', padding='same', input_shape=[self.seq_length,shaped[1],shaped[2],shaped[3]],\n # return_sequences=True)) GlobalAveragePooling3D\n # model.add(ConvLSTM2D(512, kernel_size=(3, 3), activation='relu', padding='valid',\n # input_shape=[1, shaped[1], shaped[2], shaped[3]],\n # return_sequences=True,kernel_regularizer=l2(0.1),recurrent_dropout=0.5))\n model.add(ConvLSTM2D(512, kernel_size=(3, 3), activation='relu', padding='valid', return_sequences=True,kernel_regularizer=l2(0.1),recurrent_dropout=0.5))\n model.add(ConvLSTM2D(640, kernel_size=(3, 3), activation='relu', padding='valid', return_sequences=True,\n kernel_regularizer=l2(0.1), recurrent_dropout=0.5))\n model.add(BatchNormalization())\n model.add(Flatten())\n #new added two dense\n model.add(Dense(4096, activation='relu', kernel_regularizer=l2(0.001)))\n model.add(Dropout(0.5))\n model.add(Dense(4096, activation='relu', kernel_regularizer=l2(0.001)))\n model.add(Dropout(0.5))\n # FC layers group\n #x = Dense(2048,activation='relu',kernel_regularizer=l2(weight_decay))(x)\n # x = Dropout(0.5)(x)\n model.add(Dense(self.nb_classes, activation='softmax'))\n\n return model",
"def __init__(self, n_x, n_z, qz_hid, px_hid, filters, seq_length=50, nonlinearity=rectify,\r\n px_nonlinearity=None, x_dist='linear', batchnorm=False, seed=1234):\r\n super(CVAE, self).__init__(n_x, qz_hid + px_hid, n_z, nonlinearity)\r\n self.x_dist = x_dist\r\n self.n_x = n_x\r\n self.seq_length = seq_length\r\n self.n_z = n_z\r\n self.batchnorm = batchnorm\r\n self._srng = RandomStreams(seed)\r\n\r\n # Pool layer cache\r\n pool_layers = []\r\n\r\n # Decide Glorot initializaiton of weights.\r\n init_w = 1e-3\r\n hid_w = \"\"\r\n if nonlinearity == rectify or nonlinearity == softplus:\r\n hid_w = \"relu\"\r\n\r\n # Define symbolic variables for theano functions.\r\n self.sym_x = T.tensor3('x') # inputs\r\n self.sym_z = T.matrix('z')\r\n self.sym_samples = T.iscalar('samples') # MC samples\r\n\r\n # Assist methods for collecting the layers\r\n def dense_layer(layer_in, n, dist_w=init.GlorotNormal, dist_b=init.Normal):\r\n dense = DenseLayer(layer_in, n, dist_w(hid_w), dist_b(init_w), None)\r\n if batchnorm:\r\n dense = bn(dense)\r\n return NonlinearityLayer(dense, self.transf)\r\n\r\n def stochastic_layer(layer_in, n, samples, nonlin=None):\r\n mu = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n logvar = DenseLayer(layer_in, n, init.Normal(init_w), init.Normal(init_w), nonlin)\r\n return SampleLayer(mu, logvar, eq_samples=samples, iw_samples=1), mu, logvar\r\n\r\n def conv_layer(layer_in, filter, stride=(1, 1), pool=1, name='conv'):\r\n l_conv = Conv2DLayer(layer_in, num_filters=filter, filter_size=(3, 1), stride=stride, pad='full', name=name)\r\n if pool > 1:\r\n l_conv = MaxPool2DLayer(l_conv, pool_size=(pool, 1))\r\n pool_layers.append(l_conv)\r\n return l_conv\r\n\r\n # Reshape input\r\n l_x_in = InputLayer((None, seq_length, n_x), name='Input')\r\n l_x_in_reshp = ReshapeLayer(l_x_in, (-1, 1, seq_length, n_x))\r\n print(\"l_x_in_reshp\", l_x_in_reshp.output_shape)\r\n\r\n # CNN encoder implementation\r\n l_conv_enc = l_x_in_reshp\r\n for filter, stride, pool in filters:\r\n l_conv_enc = conv_layer(l_conv_enc, filter, stride, pool)\r\n print(\"l_conv_enc\", l_conv_enc.output_shape)\r\n\r\n # Pool along last 2 axes\r\n l_global_pool_enc = GlobalPoolLayer(l_conv_enc)\r\n l_enc = dense_layer(l_global_pool_enc, n_z)\r\n print(\"l_enc\", l_enc.output_shape)\r\n\r\n # Recognition q(z|x)\r\n l_qz = l_enc\r\n for hid in qz_hid:\r\n l_qz = dense_layer(l_qz, hid)\r\n l_qz, l_qz_mu, l_qz_logvar = stochastic_layer(l_qz, n_z, self.sym_samples)\r\n print(\"l_qz\", l_qz.output_shape)\r\n\r\n # Inverse pooling\r\n l_global_depool = InverseLayer(l_qz, l_global_pool_enc)\r\n print(\"l_global_depool\", l_global_depool.output_shape)\r\n\r\n # Reverse pool layer order\r\n pool_layers = pool_layers[::-1]\r\n\r\n # Decode\r\n l_deconv = l_global_depool\r\n for idx, filter in enumerate(filters[::-1]):\r\n filter, stride, pool = filter\r\n if pool > 1:\r\n l_deconv = InverseLayer(l_deconv, pool_layers[idx])\r\n l_deconv = Conv2DLayer(l_deconv, num_filters=filter, filter_size=(3, 1), stride=(stride, 1), W=init.GlorotNormal('relu'))\r\n print(\"l_deconv\", l_deconv.output_shape)\r\n\r\n # The last l_conv layer should give us the input shape\r\n l_dec = Conv2DLayer(l_deconv, num_filters=1, filter_size=(3, 1), pad='same', nonlinearity=None)\r\n print(\"l_dec\", l_dec.output_shape)\r\n\r\n # Flatten first two dimensions\r\n l_dec = ReshapeLayer(l_dec, (-1, n_x))\r\n\r\n l_px = l_dec\r\n if x_dist == 'bernoulli':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), sigmoid)\r\n elif x_dist == 'multinomial':\r\n l_px = DenseLayer(l_px, n_x, init.GlorotNormal(), init.Normal(init_w), softmax)\r\n elif x_dist == 'gaussian':\r\n l_px, l_px_mu, l_px_logvar = stochastic_layer(l_px, n_x, self.sym_samples, px_nonlinearity)\r\n elif x_dist == 'linear':\r\n l_px = DenseLayer(l_px, n_x, nonlinearity=None)\r\n\r\n # Reshape all the model layers to have the same size\r\n self.l_x_in = l_x_in\r\n\r\n self.l_qz = ReshapeLayer(l_qz, (-1, self.sym_samples, 1, n_z))\r\n self.l_qz_mu = DimshuffleLayer(l_qz_mu, (0, 'x', 'x', 1))\r\n self.l_qz_logvar = DimshuffleLayer(l_qz_logvar, (0, 'x', 'x', 1))\r\n\r\n self.l_px = DimshuffleLayer(ReshapeLayer(l_px, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4))\r\n self.l_px_mu = DimshuffleLayer(ReshapeLayer(l_px_mu, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n self.l_px_logvar = DimshuffleLayer(ReshapeLayer(l_px_logvar, (-1, seq_length, self.sym_samples, 1, n_x)), (0, 2, 3, 1, 4)) \\\r\n if x_dist == \"gaussian\" else None\r\n\r\n # Predefined functions\r\n inputs = {self.l_x_in: self.sym_x}\r\n outputs = get_output(l_qz, inputs, deterministic=True)\r\n self.f_qz = theano.function([self.sym_x, self.sym_samples], outputs)\r\n\r\n inputs = {l_qz: self.sym_z}\r\n outputs = get_output(self.l_px, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_px = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_mu, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_mu = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n outputs = get_output(self.l_px_logvar, inputs, deterministic=True).mean(axis=(1, 2))\r\n self.f_var = theano.function([self.sym_z, self.sym_samples], outputs)\r\n\r\n # Define model parameters\r\n self.model_params = get_all_params([self.l_px])\r\n self.trainable_model_params = get_all_params([self.l_px], trainable=True)",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n layer_1 = K.layers.Conv2D(filters=F1,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_1 = layer_1(A_prev)\n layer_2 = K.layers.Conv2D(filters=F3R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_2 = layer_2(A_prev)\n layer_3 = K.layers.Conv2D(filters=F3,\n kernel_size=(3, 3),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_3 = layer_3(output_2)\n layer_4 = K.layers.Conv2D(filters=F5R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_4 = layer_4(A_prev)\n layer_5 = K.layers.Conv2D(filters=F5,\n kernel_size=(5, 5),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_5 = layer_5(output_4)\n layer_6 = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=(1, 1),\n padding='same')\n output_6 = layer_6(A_prev)\n layer_7 = K.layers.Conv2D(filters=FPP,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_7 = layer_7(output_6)\n return (K.layers.concatenate([output_1, output_3, output_5, output_7]))",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n layer_1 = K.layers.Conv2D(filters=F1,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_1 = layer_1(A_prev)\n layer_2 = K.layers.Conv2D(filters=F3R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_2 = layer_2(A_prev)\n layer_3 = K.layers.Conv2D(filters=F3,\n kernel_size=(3, 3),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_3 = layer_3(output_2)\n layer_4 = K.layers.Conv2D(filters=F5R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_4 = layer_4(A_prev)\n layer_5 = K.layers.Conv2D(filters=F5,\n kernel_size=(5, 5),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_5 = layer_5(output_4)\n layer_6 = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=(1, 1),\n padding='same')\n output_6 = layer_6(A_prev)\n layer_7 = K.layers.Conv2D(filters=FPP,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_7 = layer_7(output_6)\n return (K.layers.concatenate([output_1, output_3, output_5, output_7]))",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n\n layer1x1 = K.layers.Conv2D(F1, 1, activation='relu')(A_prev)\n\n layer3x3 = K.layers.Conv2D(F3R, 1, activation='relu')(A_prev)\n layer3x3 = K.layers.Conv2D(F3, 3, padding='same',\n activation='relu')(layer3x3)\n\n layer5x5 = K.layers.Conv2D(F5R, 1, activation='relu')(A_prev)\n layer5x5 = K.layers.Conv2D(F5, 5, padding='same',\n activation='relu')(layer5x5)\n\n pool = K.layers.MaxPool2D(3, 1, padding='same')(A_prev)\n pool = K.layers.Conv2D(FPP, 1, activation='relu')(pool)\n\n return K.layers.concatenate([layer1x1, layer3x3, layer5x5, pool])",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def __init__(self, name=\"conv_3d_lstm_cell\", **kwargs):\n super(Conv3DLSTMCell, self).__init__(conv_ndims=3, name=name, **kwargs)",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def TCN_V3(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 128\n\n config = [ \n [(1,8,128)],\n [(1,8,128)],\n [(1,8,128)],\n [(2,8,256)],\n [(1,8,256)],\n [(1,8,256)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model",
"def temp_ann(S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3, I_SHSTA_0, I_SHSTA_1,\n I_SHSTA_2, I_SHSTA_3, C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3):\n # Construct input array.\n x = np.array([S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3,\n I_SHSTA_0, I_SHSTA_1, I_SHSTA_2, I_SHSTA_3,\n C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3])\n # Pass through hidden layer 1.\n W1 = np.loadtxt('W1.txt')\n B1 = np.loadtxt('B1.txt')\n h1 = relu(np.dot(W1, x) + B1)\n # Pass through hidden layer 2.\n W2 = np.loadtxt('W2.txt')\n B2 = np.loadtxt('B2.txt')\n h2 = relu(np.dot(W2, h1) + B2)\n # Pass through hidden layer 3.\n W3 = np.loadtxt('W3.txt')\n B3 = np.loadtxt('B3.txt')\n h3 = relu(np.dot(W3, h2) + B3)\n # Pass through hidden layer 4.\n W4 = np.loadtxt('W4.txt')\n B4 = np.loadtxt('B4.txt')\n h4 = relu(np.dot(W4, h3) + B4)\n # Pass through output layer.\n WO = np.loadtxt('WO.txt')\n BO = np.loadtxt('BO.txt')\n y = relu(np.dot(WO, h4) + BO)\n print(y)\n # Return result.\n return y",
"def __init__(self, dropout_rate=0.0, in_channels=3):\n\n super(MaskNet, self).__init__()\n\n self.prep_block_1 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n self.prep_block_2 = nn.Sequential(\n nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(32),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock1 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=32, kernel_size=1, padding=0),\n nn.Conv2d(in_channels=32, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock2 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, padding=1),\n nn.ReLU(),\n nn.BatchNorm2d(64),\n nn.Dropout(dropout_rate),\n )\n\n self.convblock3 = nn.Sequential(\n nn.Conv2d(in_channels=64, out_channels=1, kernel_size=1, padding=0),\n )",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def test_unet_3d(self):\n for model_class in [UNet3D, UNetPlus3D]:\n b, d, h, w = 4, 8, 64, 64\n in_channel, out_channel = 1, 3\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual', in_channel=in_channel,\n out_channel=out_channel, pooling=True)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))\n\n b, d, h, w = 4, 9, 65, 65\n in_channel, out_channel = 1, 2\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual_se', in_channel=in_channel,\n out_channel=out_channel, pooling=False)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))\n\n b, d, h, w = 1, 65, 65, 65\n in_channel, out_channel = 1, 2\n x = torch.rand(b, in_channel, d, h, w)\n model = model_class(block_type='residual_se', in_channel=in_channel,\n out_channel=out_channel, pooling=False, is_isotropic=True)\n out = model(x)\n self.assertTupleEqual(tuple(out.shape), (b, out_channel, d, h, w))",
"def frame3dlin_KeMe(E,G,Kv1,Kv2,A1,A2,Iy1,Iy2,Iz1,Iz2,L,me1,me2,R=None):\n # --- Stifness matrix\n ke = np.array([\n [((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , ((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+3*Iy1)*E)/L , 0 , 0 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0] , \n [0 , ((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+3*Iz1)*E)/L , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L] , \n [-((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((A2+A1)*E)/(2*L) , 0 , 0 , 0 , 0 , 0] , \n [0 , -((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((2*Iz2+4*Iz1)*E)/L**2 , 0 , ((6*Iz2+6*Iz1)*E)/L**3 , 0 , 0 , 0 , -((4*Iz2+2*Iz1)*E)/L**2] , \n [0 , 0 , -((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((2*Iy2+4*Iy1)*E)/L**2 , 0 , 0 , 0 , ((6*Iy2+6*Iy1)*E)/L**3 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0] , \n [0 , 0 , 0 , -((Kv2+Kv1)*G)/(2*L) , 0 , 0 , 0 , 0 , 0 , ((Kv2+Kv1)*G)/(2*L) , 0 , 0] , \n [0 , 0 , -((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((Iy2+Iy1)*E)/L , 0 , 0 , 0 , ((4*Iy2+2*Iy1)*E)/L**2 , 0 , ((3*Iy2+Iy1)*E)/L , 0] , \n [0 , ((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((Iz2+Iz1)*E)/L , 0 , -((4*Iz2+2*Iz1)*E)/L**2 , 0 , 0 , 0 , ((3*Iz2+Iz1)*E)/L]\n ])\n # --- Mass matrix\n me = np.array([\n [(me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (3*me2+10*me1)/35 , 0 , 0 , 0 , (7*L*me2+15*L*me1)/420 , 0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , -(6*L*me2+7*L*me1)/420] , \n [0 , 0 , (3*me2+10*me1)/35 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (9*me2+9*me1)/140 , 0 , (6*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+3*me1)/12 , 0 , 0 , 0 , 0 , 0 , (me2+me1)/12 , 0 , 0] , \n [0 , 0 , -(7*L*me2+15*L*me1)/420 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , 0 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0] , \n [0 , (7*L*me2+15*L*me1)/420 , 0 , 0 , 0 , (3*L**2*me2+5*L**2*me1)/840 , 0 , (7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280] , \n [(me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0 , 0 , 0 , 0] , \n [0 , (9*me2+9*me1)/140 , 0 , 0 , 0 , (7*L*me2+6*L*me1)/420 , 0 , (10*me2+3*me1)/35 , 0 , 0 , 0 , -(15*L*me2+7*L*me1)/420] , \n [0 , 0 , (9*me2+9*me1)/140 , 0 , -(7*L*me2+6*L*me1)/420 , 0 , 0 , 0 , (10*me2+3*me1)/35 , 0 , (15*L*me2+7*L*me1)/420 , 0] , \n [0 , 0 , 0 , (me2+me1)/12 , 0 , 0 , 0 , 0 , 0 , (3*me2+me1)/12 , 0 , 0] , \n [0 , 0 , (6*L*me2+7*L*me1)/420 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , 0 , 0 , (15*L*me2+7*L*me1)/420 , 0 , (5*L**2*me2+3*L**2*me1)/840 , 0] , \n [0 , -(6*L*me2+7*L*me1)/420 , 0 , 0 , 0 , -(L**2*me2+L**2*me1)/280 , 0 , -(15*L*me2+7*L*me1)/420 , 0 , 0 , 0 , (5*L**2*me2+3*L**2*me1)/840]\n ])\n\n if (R is not None):\n RR = scipy.linalg.block_diag(R,R,R,R)\n me = np.transpose(RR).dot(me.dot(RR))\n ke = np.transpose(RR).dot(ke.dot(RR))\n\n return ke, me"
] | [
"0.6229205",
"0.6224902",
"0.6176674",
"0.61410236",
"0.61265373",
"0.6025587",
"0.6011592",
"0.59494674",
"0.5935188",
"0.58263266",
"0.581069",
"0.57841295",
"0.5780811",
"0.5777833",
"0.5774311",
"0.5756034",
"0.57487005",
"0.5735444",
"0.5735444",
"0.57328737",
"0.572618",
"0.5721188",
"0.5702022",
"0.5689507",
"0.5685806",
"0.5678754",
"0.56693447",
"0.5638781",
"0.5630382",
"0.5627495"
] | 0.645728 | 0 |
mnasneta1 w.t. 5x5MBconv3 block only | def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))",
"def TBLCCCNN_Model(pan_image_height_size, pan_image_width_size, ms_to_pan_ratio, n_bands, n1_pan, n2_pan, n3_pan, \r\n n1_ms, n2_ms, n3_ms, dropout_rate, n_classes, l_r):\r\n \r\n if (pan_image_height_size % ms_to_pan_ratio) != 0 or (pan_image_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both pan_image_height_size and pan_image_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n pan_img_input = Input(shape = (pan_image_height_size, pan_image_width_size, 1))\r\n conv_1_pan = Conv2D(n1_pan, (7, 7), padding = 'same', activation = 'relu')(pan_img_input)\r\n max_pool_1_pan = MaxPooling2D(pool_size = (2, 2))(conv_1_pan)\r\n conv_2_pan = Conv2D(n2_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_1_pan)\r\n max_pool_2_pan = MaxPooling2D(pool_size = (2, 2))(conv_2_pan)\r\n conv_3_pan = Conv2D(n3_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_2_pan)\r\n glob_max_pool_pan = GlobalMaxPooling2D()(conv_3_pan)\r\n glob_max_pool_pan = Dropout(dropout_rate)(glob_max_pool_pan)\r\n \r\n ms_img_input = Input(shape = (int(pan_image_height_size / ms_to_pan_ratio), int(pan_image_width_size / ms_to_pan_ratio), \r\n n_bands))\r\n conv_1_ms = Conv2D(n1_ms, (3, 3), padding = 'same', activation = 'relu')(ms_img_input)\r\n conv_2_ms = Conv2D(n2_ms, (3, 3), padding = 'same', activation = 'relu')(conv_1_ms)\r\n conv_3_ms = Conv2D(n3_ms, (3, 3), padding = 'same', activation = 'relu')(conv_2_ms)\r\n glob_max_pool_ms = GlobalMaxPooling2D()(conv_3_ms)\r\n glob_max_pool_ms = Dropout(dropout_rate)(glob_max_pool_ms)\r\n \r\n all_features = concatenate([glob_max_pool_pan, glob_max_pool_ms])\r\n \r\n pred_layer = Dense(n_classes, activation = 'softmax')(all_features)\r\n \r\n tblcccnn_model = Model(inputs = [ms_img_input, pan_img_input], outputs = pred_layer)\r\n tblcccnn_model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = l_r), \r\n metrics = ['categorical_crossentropy'])\r\n \r\n return tblcccnn_model",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data",
"def TCN_V5(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 32\n\n config = [ \n [(1,8,32)],\n [(1,8,32)],\n [(1,8,32)],\n [(2,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model",
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)",
"def learn_test(data, radio=0, nmk=0, maxif=0, minif=0, num_board=1, verbose=0):\r\n train = data\r\n nmNk = nmk*1024\r\n print('nmk, nmNk', nmk, nmNk)\r\n if verbose >= 1:\r\n print (\"\\n\\n\\n\\n\\ntest_minimal_01\")\r\n\r\n network = cm1k.CM1KEmulator(network_size=num_board * 1024)\r\n assert(not network.euclidean_norm)\r\n\r\n network.write_maxif(maxif)\r\n network.write_minif(minif)\r\n read_neuron_count=[0]\r\n # Train network(RBF Learning)\r\n if radio==1:\r\n\r\n l=len(train)\r\n iteration=0\r\n ID=0\r\n for i in range (0,2):\r\n if ID != l:#&(read_neuron_count[iteration] <= nmNk) :\r\n for input in train:\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn(pattern, cat, context)\r\n read_neuron_count.append(network.read_ncount())\r\n ID, UNC_c, UNC_i, UNK,total_detail, cla_result, null = classify(train, network, radio=5, spinvalue=3, num_board=1, verbose=1)\r\n iteration+=1\r\n # print(network.read_ncount())\r\n\r\n else :\r\n break\r\n #assert(network.read_ncount() == 3)\r\n # print('iteration', iteration)\r\n # print('maxif', maxif)\r\n # print('minif', minif)\r\n #print('network.register_legend[NSR] : ',network.register_legend['NSR'])\r\n # Write all sampels\r\n elif radio == 9:\r\n iteration = 0\r\n for input in train:\r\n # if verbose >= 1:\r\n # print \"================================================================================\"\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn_write_all(pattern, cat, context)\r\n # print(network.read_ncount())\r\n #elif radio==2: #Deep RBF\r\n return network, iteration, read_neuron_count",
"def __init__(self):\n super(Encoder3, self).__init__()\n self.lblocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n nn.Sequential(\n nn.MaxPool1d(kernel_size=2, stride=2),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n ),\n ]\n )\n\n self.blocks = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n nn.Conv1d(128, 128, kernel_size=9, padding=4),\n nn.BatchNorm1d(128),\n nn.ReLU(inplace=True),\n ),\n ]\n )",
"def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model",
"def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)"
] | [
"0.6580992",
"0.6524809",
"0.64358896",
"0.6420319",
"0.639647",
"0.6387695",
"0.6360615",
"0.63517386",
"0.62656593",
"0.61908257",
"0.6137714",
"0.61336964",
"0.61318547",
"0.6123348",
"0.61172646",
"0.60345227",
"0.6031906",
"0.6017946",
"0.6014714",
"0.6014486",
"0.60038435",
"0.5993517",
"0.5991082",
"0.59611595",
"0.5929779",
"0.59288627",
"0.5927978",
"0.5925654",
"0.5925368",
"0.5914999"
] | 0.66056234 | 0 |
mnasneta1 w.t. 5x5MBconv3SE block only | def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],
kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def test_se_block(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=False,\n strides=[2, 2],\n se_ratio=0.8,\n conv_type=0,\n fused_conv=0,\n super_pixel=0)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def TBLCCCNN_Model(pan_image_height_size, pan_image_width_size, ms_to_pan_ratio, n_bands, n1_pan, n2_pan, n3_pan, \r\n n1_ms, n2_ms, n3_ms, dropout_rate, n_classes, l_r):\r\n \r\n if (pan_image_height_size % ms_to_pan_ratio) != 0 or (pan_image_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both pan_image_height_size and pan_image_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n pan_img_input = Input(shape = (pan_image_height_size, pan_image_width_size, 1))\r\n conv_1_pan = Conv2D(n1_pan, (7, 7), padding = 'same', activation = 'relu')(pan_img_input)\r\n max_pool_1_pan = MaxPooling2D(pool_size = (2, 2))(conv_1_pan)\r\n conv_2_pan = Conv2D(n2_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_1_pan)\r\n max_pool_2_pan = MaxPooling2D(pool_size = (2, 2))(conv_2_pan)\r\n conv_3_pan = Conv2D(n3_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_2_pan)\r\n glob_max_pool_pan = GlobalMaxPooling2D()(conv_3_pan)\r\n glob_max_pool_pan = Dropout(dropout_rate)(glob_max_pool_pan)\r\n \r\n ms_img_input = Input(shape = (int(pan_image_height_size / ms_to_pan_ratio), int(pan_image_width_size / ms_to_pan_ratio), \r\n n_bands))\r\n conv_1_ms = Conv2D(n1_ms, (3, 3), padding = 'same', activation = 'relu')(ms_img_input)\r\n conv_2_ms = Conv2D(n2_ms, (3, 3), padding = 'same', activation = 'relu')(conv_1_ms)\r\n conv_3_ms = Conv2D(n3_ms, (3, 3), padding = 'same', activation = 'relu')(conv_2_ms)\r\n glob_max_pool_ms = GlobalMaxPooling2D()(conv_3_ms)\r\n glob_max_pool_ms = Dropout(dropout_rate)(glob_max_pool_ms)\r\n \r\n all_features = concatenate([glob_max_pool_pan, glob_max_pool_ms])\r\n \r\n pred_layer = Dense(n_classes, activation = 'softmax')(all_features)\r\n \r\n tblcccnn_model = Model(inputs = [ms_img_input, pan_img_input], outputs = pred_layer)\r\n tblcccnn_model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = l_r), \r\n metrics = ['categorical_crossentropy'])\r\n \r\n return tblcccnn_model",
"def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def construct_model():\n import lbann\n\n # Layer graph\n input = lbann.Input(target_mode='N/A', name='inp_data')\n # data is 64*64*4 images + 15 scalar + 5 param\n #inp_slice = lbann.Slice(input, axis=0, slice_points=\"0 16399 16404\",name='inp_slice')\n inp_slice = lbann.Slice(input, axis=0, slice_points=str_list([0,args.ydim,args.ydim+5]),name='inp_slice')\n gt_y = lbann.Identity(inp_slice,name='gt_y')\n gt_x = lbann.Identity(inp_slice, name='gt_x') #param not used\n\n zero = lbann.Constant(value=0.0,num_neurons='1',name='zero')\n one = lbann.Constant(value=1.0,num_neurons='1',name='one')\n\n z_dim = 20 #Latent space dim\n\n z = lbann.Gaussian(mean=0.0,stdev=1.0, neuron_dims=\"20\")\n model = macc_models.MACCWAE(args.zdim,args.ydim,cf=args.mcf,use_CNN=args.useCNN)\n d1_real, d1_fake, d_adv, pred_y = model(z,gt_y)\n\n d1_real_bce = lbann.SigmoidBinaryCrossEntropy([d1_real,one],name='d1_real_bce')\n d1_fake_bce = lbann.SigmoidBinaryCrossEntropy([d1_fake,zero],name='d1_fake_bce')\n d_adv_bce = lbann.SigmoidBinaryCrossEntropy([d_adv,one],name='d_adv_bce')\n img_loss = lbann.MeanSquaredError([pred_y,gt_y])\n rec_error = lbann.L2Norm2(lbann.WeightedSum([pred_y,gt_y], scaling_factors=\"1 -1\"))\n\n layers = list(lbann.traverse_layer_graph(input))\n # Setup objective function\n weights = set()\n src_layers = []\n dst_layers = []\n for l in layers:\n if(l.weights and \"disc0\" in l.name and \"instance1\" in l.name):\n src_layers.append(l.name)\n #freeze weights in disc2\n if(l.weights and \"disc1\" in l.name):\n dst_layers.append(l.name)\n for idx in range(len(l.weights)):\n l.weights[idx].optimizer = lbann.NoOptimizer()\n weights.update(l.weights)\n l2_reg = lbann.L2WeightRegularization(weights=weights, scale=1e-4)\n d_adv_bce = lbann.LayerTerm(d_adv_bce,scale=0.01)\n obj = lbann.ObjectiveFunction([d1_real_bce,d1_fake_bce,d_adv_bce,img_loss,rec_error,l2_reg])\n # Initialize check metric callback\n metrics = [lbann.Metric(img_loss, name='recon_error')]\n #pred_y = macc_models.MACCWAE.pred_y_name\n callbacks = [lbann.CallbackPrint(),\n lbann.CallbackTimer(),\n lbann.CallbackSaveModel(dir=args.dump_models),\n lbann.CallbackReplaceWeights(source_layers=list2str(src_layers),\n destination_layers=list2str(dst_layers),\n batch_interval=2)]\n\n if(args.ltfb_batch_interval > 0) :\n callbacks.append(lbann.CallbackLTFB(batch_interval=args.ltfb_batch_interval,metric='recon_error',\n low_score_wins=True,\n exchange_hyperparameters=True))\n\n # Construct model\n return lbann.Model(args.num_epochs,\n serialize_io=True,\n weights=weights,\n layers=layers,\n metrics=metrics,\n objective_function=obj,\n callbacks=callbacks)",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n layer_1 = K.layers.Conv2D(filters=F1,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_1 = layer_1(A_prev)\n layer_2 = K.layers.Conv2D(filters=F3R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_2 = layer_2(A_prev)\n layer_3 = K.layers.Conv2D(filters=F3,\n kernel_size=(3, 3),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_3 = layer_3(output_2)\n layer_4 = K.layers.Conv2D(filters=F5R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_4 = layer_4(A_prev)\n layer_5 = K.layers.Conv2D(filters=F5,\n kernel_size=(5, 5),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_5 = layer_5(output_4)\n layer_6 = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=(1, 1),\n padding='same')\n output_6 = layer_6(A_prev)\n layer_7 = K.layers.Conv2D(filters=FPP,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_7 = layer_7(output_6)\n return (K.layers.concatenate([output_1, output_3, output_5, output_7]))",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n layer_1 = K.layers.Conv2D(filters=F1,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_1 = layer_1(A_prev)\n layer_2 = K.layers.Conv2D(filters=F3R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_2 = layer_2(A_prev)\n layer_3 = K.layers.Conv2D(filters=F3,\n kernel_size=(3, 3),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_3 = layer_3(output_2)\n layer_4 = K.layers.Conv2D(filters=F5R,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_4 = layer_4(A_prev)\n layer_5 = K.layers.Conv2D(filters=F5,\n kernel_size=(5, 5),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_5 = layer_5(output_4)\n layer_6 = K.layers.MaxPooling2D(pool_size=(3, 3),\n strides=(1, 1),\n padding='same')\n output_6 = layer_6(A_prev)\n layer_7 = K.layers.Conv2D(filters=FPP,\n kernel_size=(1, 1),\n padding='same',\n activation=K.activations.relu,\n kernel_initializer=K.initializers.he_normal())\n output_7 = layer_7(output_6)\n return (K.layers.concatenate([output_1, output_3, output_5, output_7]))",
"def inception_block(A_prev, filters):\n F1, F3R, F3, F5R, F5, FPP = filters\n\n layer1x1 = K.layers.Conv2D(F1, 1, activation='relu')(A_prev)\n\n layer3x3 = K.layers.Conv2D(F3R, 1, activation='relu')(A_prev)\n layer3x3 = K.layers.Conv2D(F3, 3, padding='same',\n activation='relu')(layer3x3)\n\n layer5x5 = K.layers.Conv2D(F5R, 1, activation='relu')(A_prev)\n layer5x5 = K.layers.Conv2D(F5, 5, padding='same',\n activation='relu')(layer5x5)\n\n pool = K.layers.MaxPool2D(3, 1, padding='same')(A_prev)\n pool = K.layers.Conv2D(FPP, 1, activation='relu')(pool)\n\n return K.layers.concatenate([layer1x1, layer3x3, layer5x5, pool])",
"def TCN_V5(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 32\n\n config = [ \n [(1,8,32)],\n [(1,8,32)],\n [(1,8,32)],\n [(2,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def temp_ann(S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3, I_SHSTA_0, I_SHSTA_1,\n I_SHSTA_2, I_SHSTA_3, C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3):\n # Construct input array.\n x = np.array([S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3,\n I_SHSTA_0, I_SHSTA_1, I_SHSTA_2, I_SHSTA_3,\n C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3])\n # Pass through hidden layer 1.\n W1 = np.loadtxt('W1.txt')\n B1 = np.loadtxt('B1.txt')\n h1 = relu(np.dot(W1, x) + B1)\n # Pass through hidden layer 2.\n W2 = np.loadtxt('W2.txt')\n B2 = np.loadtxt('B2.txt')\n h2 = relu(np.dot(W2, h1) + B2)\n # Pass through hidden layer 3.\n W3 = np.loadtxt('W3.txt')\n B3 = np.loadtxt('B3.txt')\n h3 = relu(np.dot(W3, h2) + B3)\n # Pass through hidden layer 4.\n W4 = np.loadtxt('W4.txt')\n B4 = np.loadtxt('B4.txt')\n h4 = relu(np.dot(W4, h3) + B4)\n # Pass through output layer.\n WO = np.loadtxt('WO.txt')\n BO = np.loadtxt('BO.txt')\n y = relu(np.dot(WO, h4) + BO)\n print(y)\n # Return result.\n return y",
"def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data"
] | [
"0.6417066",
"0.63162094",
"0.62862533",
"0.6282772",
"0.62645257",
"0.62633777",
"0.6236479",
"0.6060194",
"0.59885466",
"0.5915113",
"0.5875557",
"0.5869749",
"0.5859015",
"0.58565146",
"0.58363396",
"0.5828402",
"0.5815745",
"0.5803984",
"0.5789562",
"0.57847863",
"0.577449",
"0.57586426",
"0.5756944",
"0.5756944",
"0.57487994",
"0.57448375",
"0.5738793",
"0.5737326",
"0.57265884",
"0.57210284"
] | 0.66267586 | 0 |
mnasneta1 w.t. 3x3MBconv6 block only | def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def learn_test(data, radio=0, nmk=0, maxif=0, minif=0, num_board=1, verbose=0):\r\n train = data\r\n nmNk = nmk*1024\r\n print('nmk, nmNk', nmk, nmNk)\r\n if verbose >= 1:\r\n print (\"\\n\\n\\n\\n\\ntest_minimal_01\")\r\n\r\n network = cm1k.CM1KEmulator(network_size=num_board * 1024)\r\n assert(not network.euclidean_norm)\r\n\r\n network.write_maxif(maxif)\r\n network.write_minif(minif)\r\n read_neuron_count=[0]\r\n # Train network(RBF Learning)\r\n if radio==1:\r\n\r\n l=len(train)\r\n iteration=0\r\n ID=0\r\n for i in range (0,2):\r\n if ID != l:#&(read_neuron_count[iteration] <= nmNk) :\r\n for input in train:\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn(pattern, cat, context)\r\n read_neuron_count.append(network.read_ncount())\r\n ID, UNC_c, UNC_i, UNK,total_detail, cla_result, null = classify(train, network, radio=5, spinvalue=3, num_board=1, verbose=1)\r\n iteration+=1\r\n # print(network.read_ncount())\r\n\r\n else :\r\n break\r\n #assert(network.read_ncount() == 3)\r\n # print('iteration', iteration)\r\n # print('maxif', maxif)\r\n # print('minif', minif)\r\n #print('network.register_legend[NSR] : ',network.register_legend['NSR'])\r\n # Write all sampels\r\n elif radio == 9:\r\n iteration = 0\r\n for input in train:\r\n # if verbose >= 1:\r\n # print \"================================================================================\"\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn_write_all(pattern, cat, context)\r\n # print(network.read_ncount())\r\n #elif radio==2: #Deep RBF\r\n return network, iteration, read_neuron_count",
"def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def __init__(self, classes=2622):\n super().__init__()\n self.conv1 = _ConvBlock(3, 64, 64)\n self.conv2 = _ConvBlock(64, 128, 128)\n self.conv3 = _ConvBlock(128, 256, 256, 256)\n self.conv4 = _ConvBlock(256, 512, 512, 512)\n self.conv5 = _ConvBlock(512, 512, 512, 512)\n self.dropout = torch.nn.Dropout(0.5)\n self.fc1 = torch.nn.Linear(7 * 7 * 512, 4096)\n self.fc2 = torch.nn.Linear(4096, 4096)\n self.fc3 = torch.nn.Linear(4096, classes)",
"def __init__(self):\n super(Decoder_1m, self).__init__()\n self.lconvtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Dropout(p=0.1),\n nn.Conv2d(128, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n ),\n ]\n )\n\n self.convtwos = nn.ModuleList(\n [\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=1),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=2, dilation=2),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=4, dilation=4),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=8, dilation=8),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=16, dilation=16),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=32, dilation=32),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n nn.Sequential(\n nn.Conv2d(64, 32, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(32),\n nn.ReLU(inplace=True),\n nn.Conv2d(32, 64, kernel_size=(3, 3), padding=64, dilation=64),\n nn.BatchNorm2d(64),\n nn.ReLU(inplace=True),\n ),\n ]\n )\n self.final = nn.Sequential(\n nn.Conv2d(64, 5, kernel_size=(1, 1), padding=0),\n nn.BatchNorm2d(5),\n nn.ReLU(inplace=True),\n nn.Conv2d(5, 1, kernel_size=(1, 1), padding=0),\n )",
"def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))",
"def __init__(self, dropout_rate, num_classes, include_top, layer):\r\n super(VGG16_Shuffle, self).__init__()\r\n print(\"CIFAR VGG16_Shuffle is used\")\r\n self.dropout_rate = dropout_rate\r\n self.num_classes = num_classes\r\n self.include_top = include_top\r\n self.layer = layer\r\n\r\n # Define the building blocks\r\n if layer == 11:\r\n self.conv11 = CONV_3x3shuffle(3, 64, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv11 = CONV_3x3(3, 64, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 12:\r\n self.conv12 = nn.Sequential(CONV_3x3shuffle(64, 64, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv12 = nn.Sequential(CONV_3x3(64, 64, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv12 = CONV_3x3(64, 64, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 21:\r\n self.conv21 = CONV_3x3shuffle(64, 128, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv21 = CONV_3x3(64, 128, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 22:\r\n self.conv22 = nn.Sequential(CONV_3x3shuffle(128, 128, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv22 = nn.Sequential(CONV_3x3(128, 128, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv22 = CONV_3x3(128, 128, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 31:\r\n self.conv31 = CONV_3x3shuffle(128, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv31 = CONV_3x3(128, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 32:\r\n self.conv32 = CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv32 = CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 33:\r\n self.conv33 = nn.Sequential(CONV_3x3shuffle(256, 256, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv33 = nn.Sequential(CONV_3x3(256, 256, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv33 = CONV_3x3(256, 256, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 41:\r\n self.conv41 = CONV_3x3shuffle(256, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv41 = CONV_3x3(256, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 42:\r\n self.conv42 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv42 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 43:\r\n self.conv43 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv43 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n # self.conv43 = CONV_3x3(512, 512, kernelsize=3, stride=2, padding=1, bias=False)\r\n\r\n if layer == 51:\r\n self.conv51 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv51 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 52:\r\n self.conv52 = CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1)\r\n else:\r\n self.conv52 = CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False)\r\n\r\n if layer == 53:\r\n self.conv53 = nn.Sequential(CONV_3x3shuffle(512, 512, kernelsize=3, stride=1, padding=1),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n else:\r\n self.conv53 = nn.Sequential(CONV_3x3(512, 512, kernelsize=3, stride=1, padding=1, bias=False),\r\n nn.MaxPool2d(kernel_size=2, stride=2))\r\n\r\n self.avgpool = nn.AdaptiveAvgPool2d(1)\r\n self.fc = nn.Sequential(nn.Linear(512, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, 4096),\r\n nn.ReLU(True),\r\n nn.Linear(4096, num_classes))\r\n\r\n # Initialize the weights\r\n for m in self.modules():\r\n if isinstance(m, nn.Conv2d):\r\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\r\n elif isinstance(m, nn.BatchNorm2d):\r\n # raise Exception('You are using a model without BN!!!')\r\n nn.init.constant_(m.weight, 1)\r\n nn.init.constant_(m.bias, 0)",
"def mobile_net(\n num_classes=1000,\n data_shape=(1, 3, 224, 224),\n dtype=\"float32\",\n alpha=1.0,\n is_shallow=False,\n layout=\"NCHW\",\n):\n data = relay.var(\"data\", shape=data_shape, dtype=dtype)\n body = conv_block(data, \"conv_block_1\", int(32 * alpha), strides=(2, 2), layout=layout)\n body = separable_conv_block(\n body, \"separable_conv_block_1\", int(32 * alpha), int(64 * alpha), layout=layout, dtype=dtype\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_2\",\n int(64 * alpha),\n int(128 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_3\",\n int(128 * alpha),\n int(128 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_4\",\n int(128 * alpha),\n int(256 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_5\",\n int(256 * alpha),\n int(256 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_6\",\n int(256 * alpha),\n int(512 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n if is_shallow:\n body = separable_conv_block(\n body,\n \"separable_conv_block_7\",\n int(512 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_8\",\n int(1024 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n else:\n for i in range(7, 12):\n body = separable_conv_block(\n body,\n f\"separable_conv_block_{i}\",\n int(512 * alpha),\n int(512 * alpha),\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_12\",\n int(512 * alpha),\n int(1024 * alpha),\n downsample=True,\n layout=layout,\n dtype=dtype,\n )\n body = separable_conv_block(\n body,\n \"separable_conv_block_13\",\n int(1024 * alpha),\n int(1024 * alpha),\n layout=layout,\n dtype=dtype,\n )\n pool = relay.nn.global_avg_pool2d(data=body, layout=layout)\n flatten = relay.nn.batch_flatten(data=pool)\n weight = relay.var(\"fc_weight\")\n bias = relay.var(\"fc_bias\")\n fc = relay.nn.dense(data=flatten, weight=weight, units=num_classes)\n fc = relay.nn.bias_add(fc, bias)\n softmax = relay.nn.softmax(data=fc)\n return relay.Function(relay.analysis.free_vars(softmax), softmax)",
"def mgcNetArchSkipMini(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=8, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv1)\n conv2 = layers.Activation('relu')(conv2) \n conv2 = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv2)\n conv3 = layers.Activation('relu')(conv3) \n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n \n # skip connect 1\n #shortcut_layer = layers.Conv2D(filters=64, kernel_size=(1,1), padding='same', activation='relu', strides = 4)(conv1)\n shortcut_layer = layers.Conv2D(filters=16, kernel_size=(1,1), padding='same', activation='relu', strides = 8)(img_shape)\n \n conv3 = layers.add([shortcut_layer, conv3])\n #conv3 = layers.Concatenate()([shortcut_layer,conv3]) \n\n # Layer 4\n #------------------------\n conv4 = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', dilation_rate = (2, 2), kernel_regularizer=regularizers.l2(l2_val))(conv3)\n conv4 = layers.Activation('relu')(conv4)\n conv4 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv4)\n conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output)\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(output) \n output = layers.Dropout(0.4)(output)\n \n # skip connect 2\n shortcut_layer2 = layers.Conv2D(filters=32, kernel_size=(1,1), padding='same', activation='relu', strides = 2)(conv3)\n output = layers.add([shortcut_layer2, output])\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(32, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'skipconnect')\n\n return model",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])"
] | [
"0.6497341",
"0.62858516",
"0.6241714",
"0.62130415",
"0.62099636",
"0.61729336",
"0.6155735",
"0.6155116",
"0.61449134",
"0.61261827",
"0.60924345",
"0.60896015",
"0.60837203",
"0.60367846",
"0.60332847",
"0.59898615",
"0.59516066",
"0.59444976",
"0.58755845",
"0.5873228",
"0.5862972",
"0.58531487",
"0.58524007",
"0.58358955",
"0.58337003",
"0.582745",
"0.58264685",
"0.5825826",
"0.58185107",
"0.5814279"
] | 0.6507609 | 0 |
mnasneta1 w.t. 3x3MBconv6SE block only | def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],
kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def test_se_block(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=False,\n strides=[2, 2],\n se_ratio=0.8,\n conv_type=0,\n fused_conv=0,\n super_pixel=0)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def init_stitch(N):\n\tif N is None:\n\t\tN = np.int32(HII_DIM) #prepare for stitching\n\tMETA_GRID_SIZE = DIM/N\n\tM = np.int32(HII_DIM/META_GRID_SIZE)\n\t#HII_DIM = np.int32(HII_DIM)\n\tf_pixel_factor = DIM/HII_DIM;\n\tscale = np.float32(BOX_LEN/DIM)\n\tprint 'scale', scale\n\tHII_scale = np.float32(BOX_LEN/HII_DIM)\n\tshape = (DIM,DIM,N)\n\tstitch_grid_size = (DIM/(block_size[0]),\n\t\t\t\t\t\tDIM/(block_size[0]),\n\t\t\t\t\t\tN/(block_size[0]))\n\tHII_stitch_grid_size = (HII_DIM/(block_size[0]),\n\t\t\t\t\t\tHII_DIM/(block_size[0]),\n\t\t\t\t\t\tM/(block_size[0]))\n\t#ratio of large box to small size\n\tkernel_source = open(cmd_folder+\"/initialize_stitch.cu\").read()\n\tkernel_code = kernel_source % {\n\n\t\t'DELTAK': DELTA_K,\n\t\t'DIM': DIM, \n\t\t'VOLUME': VOLUME,\n\t\t'META_BLOCKDIM': N\n\t}\n\tmain_module = nvcc.SourceModule(kernel_code)\n\tinit_stitch = main_module.get_function(\"init_kernel\")\n\tHII_filter = main_module.get_function(\"HII_filter\")\n\tsubsample_kernel = main_module.get_function(\"subsample\")\n\tvelocity_kernel = main_module.get_function(\"set_velocity\")\n\tpspec_texture = main_module.get_texref(\"pspec\")\n\tMRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=0)\n\tplan2d = Plan((np.int64(DIM), np.int64(DIM)), dtype=np.complex64)\n\tplan1d = Plan((np.int64(DIM)), dtype=np.complex64)\n\tprint \"init pspec\"\n\tinterpPspec, interpSize = init_pspec() #interpPspec contains both k array and P array\n\tinterp_cu = cuda.matrix_to_array(interpPspec, order='F')\n\tcuda.bind_array_to_texref(interp_cu, pspec_texture)\n\t#hbox_large = pyfftw.empty_aligned((DIM, DIM, DIM), dtype='complex64')\n\thbox_large = np.zeros((DIM, DIM, DIM), dtype=np.complex64)\n\t#hbox_small = np.zeros(HII_shape, dtype=np.float32)\n\t#hbox_large = n\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\n\t# Set up pinned memory for transfer\n\t#largebox_hs = cuda.aligned_empty(shape=shape, dtype=np.float32, alignment=resource.getpagesize())\n\tlargebox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.float32)\n\tlargecbox_pin = cuda.pagelocked_empty(shape=shape, dtype=np.complex64)\n\n\tlargebox_d = gpuarray.zeros(shape, dtype=np.float32)\n\tlargebox_d_imag = gpuarray.zeros(shape, dtype=np.float32)\n\tprint \"init boxes\"\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t# MRGgen = MRG32k3aRandomNumberGenerator(seed_getter=seed_getter_uniform, offset=meta_x*N**3)\n\t\tinit_stitch(largebox_d, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tinit_stitch(largebox_d_imag, DIM, np.int32(meta_z),block=block_size, grid=stitch_grid_size)\n\t\tlargebox_d *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d_imag *= MRGgen.gen_normal(shape, dtype=np.float32)\n\t\tlargebox_d = largebox_d + np.complex64(1.j) * largebox_d_imag\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largecbox_pin.copy()\n\t#if want to get velocity need to use this\n\tif True:\n\t\tprint \"saving kbox\"\n\t\tnp.save(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\n\tprint \"Executing FFT on device\"\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint hbox_large.dtype\n\tprint \"Finished FFT on device\"\n\tnp.save(parent_folder+\"/Boxes/deltax_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN), hbox_large)\n\t\n\tif True:\n\t\tprint \"loading kbox\"\n\t\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\t#cuda.memcpy_htod_async(largebox_d, largebox_pin)\n\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tHII_filter(largebox_d, DIM, np.int32(meta_z), ZERO, smoothR, block=block_size, grid=stitch_grid_size);\n\t\thbox_large[:, :, meta_z*N:(meta_z+1)*N] = largebox_d.get_async()\n\t#import IPython; IPython.embed()\n\tprint \"Executing FFT on host\"\n\t#hbox_large = hifft(hbox_large).astype(np.complex64).real\n\t#hbox_large = pyfftw.interfaces.numpy_fft.ifftn(hbox_large).real\n\thbox_large = fft_stitch(N, plan2d, plan1d, hbox_large, largebox_d).real\n\tprint \"Finished FFT on host\"\n\t#import IPython; IPython.embed()\n\n\t# for meta_x in xrange(META_GRID_SIZE):\n\t# \tfor meta_y in xrange(META_GRID_SIZE):\n\t# \t\tfor meta_z in xrange(META_GRID_SIZE):\n\t# \t\t\tlargebox_d = gpuarray.to_gpu(hbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N])\n\t# \t\t\tHII_filter(largebox_d, N, np.int32(meta_x), np.int32(meta_y), np.int32(meta_z), ZERO, smoothR, block=block_size, grid=grid_size);\n\t# \t\t\thbox_large[meta_x*N:(meta_x+1)*N, meta_y*N:(meta_y+1)*N, meta_z*N:(meta_z+1)*N] = largebox_d.get()\n\t#plan = Plan(shape, dtype=np.complex64)\n\t#plan.execute(largebox_d, inverse=True) #FFT to real space of smoothed box\n\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\n\n\t# This saves a large resolution deltax\n\n\t\n\tprint \"downsampling\"\n\tsmallbox_d = gpuarray.zeros((HII_DIM,HII_DIM,M), dtype=np.float32)\n\tfor meta_z in xrange(META_GRID_SIZE):\n\t\tlargebox_pin = hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy()\n\t\tcuda.memcpy_dtoh_async(largecbox_pin, largebox_d)\n\t\t#largebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\tlargebox_d /= scale**3 #\n\t\tsubsample_kernel(largebox_d, smallbox_d, DIM, HII_DIM, PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size) #subsample in real space\n\t\thbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallbox_d.get_async()\n\tnp.save(parent_folder+\"/Boxes/smoothed_deltax_z0.00_{0:d}_{1:.0f}Mpc\".format(HII_DIM, BOX_LEN), hbox_small)\n\t#import IPython; IPython.embed()\n\n\n\t# To get velocities: reload the k-space box\n\thbox_large = np.load(parent_folder+\"/Boxes/deltak_z0.00_{0:d}_{1:.0f}Mpc.npy\".format(DIM, BOX_LEN))\n\thvbox_large = np.zeros((DIM, DIM, DIM), dtype=np.float32)\n\thvbox_small = np.zeros(HII_shape, dtype=np.float32)\n\tsmoothR = np.float32(L_FACTOR*BOX_LEN/HII_DIM)\n\tlargevbox_d = gpuarray.zeros((DIM,DIM,N), dtype=np.complex64)\n\tsmallvbox_d = gpuarray.zeros((HII_DIM, HII_DIM, M), dtype=np.float32)\n\tfor num, mode in enumerate(['x', 'y', 'z']):\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargebox_d = gpuarray.to_gpu_async(hbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\t#largebox_d /= VOLUME #divide by VOLUME if using fft (vs ifft)\n\t\t\tvelocity_kernel(largebox_d, largevbox_d, DIM, np.int32(meta_z), np.int32(num), block=block_size, grid=stitch_grid_size)\n\t\t\tHII_filter(largevbox_d, DIM, ZERO, smoothR, block=block_size, grid=stitch_grid_size)\n\t\t\tprint hvbox_large.shape, largevbox_d.shape\n\t\t\thvbox_large[:, :, meta_z*N:(meta_z+1)*N] = largevbox_d.get_async()\n\t\thvbox_large = fft_stitch(N, plan2d, plan1d, hvbox_large, largevbox_d).real\n\t\tfor meta_z in xrange(META_GRID_SIZE):\n\t\t\tlargevbox_d = gpuarray.to_gpu_async(hvbox_large[:, :, meta_z*N:(meta_z+1)*N].copy())\n\t\t\tsubsample_kernel(largevbox_d.real, smallvbox_d, DIM, HII_DIM,PIXEL_FACTOR, block=block_size, grid=HII_stitch_grid_size)\n\t\t\thvbox_small[:, :, meta_z*M:(meta_z+1)*M] = smallvbox_d.get_async()\n\t\tnp.save(parent_folder+\"/Boxes/v{0}overddot_{1:d}_{2:.0f}Mpc\".format(mode, HII_DIM, BOX_LEN), smallvbox_d.get())\n\n\treturn",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def temp_ann(S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3, I_SHSTA_0, I_SHSTA_1,\n I_SHSTA_2, I_SHSTA_3, C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3):\n # Construct input array.\n x = np.array([S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3,\n I_SHSTA_0, I_SHSTA_1, I_SHSTA_2, I_SHSTA_3,\n C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3])\n # Pass through hidden layer 1.\n W1 = np.loadtxt('W1.txt')\n B1 = np.loadtxt('B1.txt')\n h1 = relu(np.dot(W1, x) + B1)\n # Pass through hidden layer 2.\n W2 = np.loadtxt('W2.txt')\n B2 = np.loadtxt('B2.txt')\n h2 = relu(np.dot(W2, h1) + B2)\n # Pass through hidden layer 3.\n W3 = np.loadtxt('W3.txt')\n B3 = np.loadtxt('B3.txt')\n h3 = relu(np.dot(W3, h2) + B3)\n # Pass through hidden layer 4.\n W4 = np.loadtxt('W4.txt')\n B4 = np.loadtxt('B4.txt')\n h4 = relu(np.dot(W4, h3) + B4)\n # Pass through output layer.\n WO = np.loadtxt('WO.txt')\n BO = np.loadtxt('BO.txt')\n y = relu(np.dot(WO, h4) + BO)\n print(y)\n # Return result.\n return y",
"def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def learn_test(data, radio=0, nmk=0, maxif=0, minif=0, num_board=1, verbose=0):\r\n train = data\r\n nmNk = nmk*1024\r\n print('nmk, nmNk', nmk, nmNk)\r\n if verbose >= 1:\r\n print (\"\\n\\n\\n\\n\\ntest_minimal_01\")\r\n\r\n network = cm1k.CM1KEmulator(network_size=num_board * 1024)\r\n assert(not network.euclidean_norm)\r\n\r\n network.write_maxif(maxif)\r\n network.write_minif(minif)\r\n read_neuron_count=[0]\r\n # Train network(RBF Learning)\r\n if radio==1:\r\n\r\n l=len(train)\r\n iteration=0\r\n ID=0\r\n for i in range (0,2):\r\n if ID != l:#&(read_neuron_count[iteration] <= nmNk) :\r\n for input in train:\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn(pattern, cat, context)\r\n read_neuron_count.append(network.read_ncount())\r\n ID, UNC_c, UNC_i, UNK,total_detail, cla_result, null = classify(train, network, radio=5, spinvalue=3, num_board=1, verbose=1)\r\n iteration+=1\r\n # print(network.read_ncount())\r\n\r\n else :\r\n break\r\n #assert(network.read_ncount() == 3)\r\n # print('iteration', iteration)\r\n # print('maxif', maxif)\r\n # print('minif', minif)\r\n #print('network.register_legend[NSR] : ',network.register_legend['NSR'])\r\n # Write all sampels\r\n elif radio == 9:\r\n iteration = 0\r\n for input in train:\r\n # if verbose >= 1:\r\n # print \"================================================================================\"\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn_write_all(pattern, cat, context)\r\n # print(network.read_ncount())\r\n #elif radio==2: #Deep RBF\r\n return network, iteration, read_neuron_count",
"def mgcNetArchSkipMini(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=8, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv1)\n conv2 = layers.Activation('relu')(conv2) \n conv2 = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv2)\n conv3 = layers.Activation('relu')(conv3) \n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n \n # skip connect 1\n #shortcut_layer = layers.Conv2D(filters=64, kernel_size=(1,1), padding='same', activation='relu', strides = 4)(conv1)\n shortcut_layer = layers.Conv2D(filters=16, kernel_size=(1,1), padding='same', activation='relu', strides = 8)(img_shape)\n \n conv3 = layers.add([shortcut_layer, conv3])\n #conv3 = layers.Concatenate()([shortcut_layer,conv3]) \n\n # Layer 4\n #------------------------\n conv4 = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', dilation_rate = (2, 2), kernel_regularizer=regularizers.l2(l2_val))(conv3)\n conv4 = layers.Activation('relu')(conv4)\n conv4 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv4)\n conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output)\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(output) \n output = layers.Dropout(0.4)(output)\n \n # skip connect 2\n shortcut_layer2 = layers.Conv2D(filters=32, kernel_size=(1,1), padding='same', activation='relu', strides = 2)(conv3)\n output = layers.add([shortcut_layer2, output])\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(32, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'skipconnect')\n\n return model",
"def SSRNBlock(Yhat,c,Fo,scope='SSRNBlock',reuse=None):\n\n with tf.variable_scope(scope,reuse=reuse):\n conv_params = {\"filters\":c,\"kernel_size\":1,\"dilation_rate\":1,\"padding\":'same'} \n deconv_params = {\"filters\":c,\"kernel_size\":2,\"strides\":2,\"padding\":'same'} \n with tf.variable_scope('C_layer1', reuse): # conv 1\n L1 = conv1d(Yhat,**conv_params)\n with tf.variable_scope('HC_block1', reuse): # hc block 1\n L2_1 = highway_activation_conv(L1,kernel_size=3,padding='same',scope='HC1')\n L2_2 = highway_activation_conv(L2_1,kernel_size=3,dilation_rate=3,padding='same',scope='HC2')\n with tf.variable_scope('D_block1', reuse): # deconv block 1\n L3_1 = conv1d_transpose(L2_2,**deconv_params)\n L3_2 = highway_activation_conv(L3_1,kernel_size=3,padding='same',scope='HC1') \n L3_3 = highway_activation_conv(L3_2,kernel_size=3,dilation_rate=3,padding='same',scope='HC2') \n with tf.variable_scope('D_block2', reuse): # deconv block 2\n L4_1 = conv1d_transpose(L3_3,**deconv_params)\n L4_2 = highway_activation_conv(L4_1,kernel_size=3,padding='same',scope='HC1') \n L4_3 = highway_activation_conv(L4_2,kernel_size=3,dilation_rate=3,padding='same',scope='HC2')\n with tf.variable_scope('C_layer2', reuse): # conv 2\n conv_params[\"filters\"] = 2*c\n L5 = conv1d(L4_3,**conv_params) \n with tf.variable_scope('HC_block2', reuse): # hc block 2\n L6_1 = highway_activation_conv(L5,kernel_size=3,padding='same',scope='HC1') \n L6_2 = highway_activation_conv(L6_1,kernel_size=3,padding='same',scope='HC2') \n with tf.variable_scope('C_layer3', reuse): # conv block 3\n conv_params[\"filters\"] = Fo \n L7_1 = conv1d(L6_2,**conv_params) \n L7_2 = tf.nn.relu(conv1d(L7_1,**conv_params))\n L7_3 = tf.nn.relu(conv1d(L7_2,**conv_params))\n Zlogit = conv1d(L7_3,**conv_params)\n Zhat = tf.nn.sigmoid(Zlogit) # sigmoid output layer\n \n return Zlogit, Zhat",
"def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)",
"def identity_block(inputs, kernel_size, filters, stage, block,\n training):\n nb_filter1, nb_filter2, nb_filter3 = filters\n\n scope_name = 'conv'+str(stage)+block+'_branch'\n scope_name = 'bn'+str(stage)+block+'_branch'\n\n x = slim.conv2d(inputs,nb_filter1,[1,1],stride=[1,1],padding='VALID',activation_fn=None,\n trainable=training,scope=scope_name+'2a')\n x = slim.batch_norm(x,scope=scope_name+'2a',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter2,[kernel_size,kernel_size],stride=[1,1],padding='SAME',\n activation_fn=None,trainable=training,scope=scope_name+'2b')\n x = slim.batch_norm(x,scope=scope_name+'2b',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter3,[1,1],stride=[1,1],padding='VALID',\n activation_fn=None,trainable=training,scope=scope_name+'2c')\n x = slim.batch_norm(x,scope=scope_name+'2c',is_training=training)\n\n \n x = tf.add(x,inputs)\n x = tf.nn.relu(x,name='res'+str(stage)+block+\"_out\")\n return x",
"def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def mnasneta1_3x3mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)"
] | [
"0.62559426",
"0.61565256",
"0.61362463",
"0.6130783",
"0.6099492",
"0.60563445",
"0.60303664",
"0.60301816",
"0.59409606",
"0.5915859",
"0.58603007",
"0.58568764",
"0.5853717",
"0.5841832",
"0.5811412",
"0.5799647",
"0.5791486",
"0.5767362",
"0.57564116",
"0.5756015",
"0.57361245",
"0.5735948",
"0.57330716",
"0.5694327",
"0.5681914",
"0.56805724",
"0.567513",
"0.5671407",
"0.5671082",
"0.5669593"
] | 0.6361786 | 0 |
mnasneta1 w.t. 5x5MBconv6 block only | def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],
kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def learn_test(data, radio=0, nmk=0, maxif=0, minif=0, num_board=1, verbose=0):\r\n train = data\r\n nmNk = nmk*1024\r\n print('nmk, nmNk', nmk, nmNk)\r\n if verbose >= 1:\r\n print (\"\\n\\n\\n\\n\\ntest_minimal_01\")\r\n\r\n network = cm1k.CM1KEmulator(network_size=num_board * 1024)\r\n assert(not network.euclidean_norm)\r\n\r\n network.write_maxif(maxif)\r\n network.write_minif(minif)\r\n read_neuron_count=[0]\r\n # Train network(RBF Learning)\r\n if radio==1:\r\n\r\n l=len(train)\r\n iteration=0\r\n ID=0\r\n for i in range (0,2):\r\n if ID != l:#&(read_neuron_count[iteration] <= nmNk) :\r\n for input in train:\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn(pattern, cat, context)\r\n read_neuron_count.append(network.read_ncount())\r\n ID, UNC_c, UNC_i, UNK,total_detail, cla_result, null = classify(train, network, radio=5, spinvalue=3, num_board=1, verbose=1)\r\n iteration+=1\r\n # print(network.read_ncount())\r\n\r\n else :\r\n break\r\n #assert(network.read_ncount() == 3)\r\n # print('iteration', iteration)\r\n # print('maxif', maxif)\r\n # print('minif', minif)\r\n #print('network.register_legend[NSR] : ',network.register_legend['NSR'])\r\n # Write all sampels\r\n elif radio == 9:\r\n iteration = 0\r\n for input in train:\r\n # if verbose >= 1:\r\n # print \"================================================================================\"\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn_write_all(pattern, cat, context)\r\n # print(network.read_ncount())\r\n #elif radio==2: #Deep RBF\r\n return network, iteration, read_neuron_count",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def _make_conv_layers_bn_padding(self): ## 20 Convs, used for pretrained by IMAGE Net 1000 class\n conv = nn.Sequential(\n nn.Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=2, padding=3, bias=False), # padding=3 so, output is 224.\n nn.BatchNorm2d(64),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(kernel_size=2, stride=2),\n\n nn.Conv2d(64, 192, 3, padding=1, bias=False),\n nn.BatchNorm2d(192),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(192, 128, 1, bias=False), ## kernel size = 1 이므로 padding = 0(defalut)\n nn.BatchNorm2d(128),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(128, 256, 3, padding=1, bias=False), \n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False), \n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 256, 1, bias=False),\n nn.BatchNorm2d(256),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(256, 512, 3, padding=1, bias=False),\n nn.BatchNorm2d(512), \n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(512, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n nn.MaxPool2d(2,2),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True),\n\n nn.Conv2d(1024, 512, 1, bias=False),\n nn.BatchNorm2d(512),\n nn.LeakyReLU(0.1, inplace=True),\n nn.Conv2d(512, 1024, 3, padding=1, bias=False),\n nn.BatchNorm2d(1024),\n nn.LeakyReLU(0.1, inplace=True)\n )\n return conv",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)",
"def train(trial_num, image_num, filter_num, filter_size, input_size, channel_num, pooling_rate, left_upper_padding, right_lower_padding):\n\n input_batch_num = 1\n batch_num = 2\n\n init_filters = np.array(np.random.normal(size=filter_num * channel_num *\n filter_size*filter_size), dtype=\"float32\")\n #init_filters = np.array([1.0] * filter_num * channel_num * filter_size * filter_size, dtype=\"float32\")\n init_filters = 0.01 * init_filters.reshape(filter_num, channel_num*filter_size*filter_size)\n\n init_hbias = np.array([-0.1] * filter_num, dtype=\"float32\").reshape(filter_num, 1)\n\n init_vbias = np.array([0.0] * channel_num, dtype=\"float32\").reshape(channel_num, 1)\n\n libnvcrbm = __import__(\"nvcrbm\")\n cur_filters = libnvcrbm.init(filter_num, filter_size, \n input_batch_num, input_size, channel_num,\n pooling_rate, left_upper_padding, right_lower_padding,\n init_filters, init_hbias, init_vbias)\n\n imgs = cPickle.load(open(\"../data/kyoto_large_train.pkl\", \"r\"))\n img_size = imgs[0].shape[0]\n\n for trial_idx in xrange(trial_num):\n for img_idx in xrange(image_num):\n for batch_idx in xrange(batch_num):\n row_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n col_idx = np.arange(0, input_size) + np.random.random_integers(img_size - 2 * filter_size - input_size) + filter_size - 1\n #row_idx = np.arange(0, input_size) + 200\n #col_idx = np.arange(0, input_size) + 200\n\n batch_data = imgs[img_idx][row_idx][:,col_idx]\n batch_data = batch_data - batch_data.mean()\n batch_data = np.asarray(batch_data.reshape(1, input_size * input_size), dtype=\"float32\")\n \n libnvcrbm.run_batch(trial_idx, img_idx, batch_idx, batch_data)\n\n libnvcrbm.print_result()\n cur_filters = libnvcrbm.get_gpu_filters()\n dump_filter_image(cur_filters, \"../data/kyoto/filters/trial_%d.png\" % trial_idx)\n\n first_layer = {}\n first_layer[\"filters\"] = cur_filters\n first_layer[\"bias\"] = libnvcrbm.get_gpu_hbias()\n cPickle.dump(first_layer, open(\"../data/first_layer.dat\", \"w+\"))",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def TBLCCCNN_Model(pan_image_height_size, pan_image_width_size, ms_to_pan_ratio, n_bands, n1_pan, n2_pan, n3_pan, \r\n n1_ms, n2_ms, n3_ms, dropout_rate, n_classes, l_r):\r\n \r\n if (pan_image_height_size % ms_to_pan_ratio) != 0 or (pan_image_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both pan_image_height_size and pan_image_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n pan_img_input = Input(shape = (pan_image_height_size, pan_image_width_size, 1))\r\n conv_1_pan = Conv2D(n1_pan, (7, 7), padding = 'same', activation = 'relu')(pan_img_input)\r\n max_pool_1_pan = MaxPooling2D(pool_size = (2, 2))(conv_1_pan)\r\n conv_2_pan = Conv2D(n2_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_1_pan)\r\n max_pool_2_pan = MaxPooling2D(pool_size = (2, 2))(conv_2_pan)\r\n conv_3_pan = Conv2D(n3_pan, (3, 3), padding = 'same', activation = 'relu')(max_pool_2_pan)\r\n glob_max_pool_pan = GlobalMaxPooling2D()(conv_3_pan)\r\n glob_max_pool_pan = Dropout(dropout_rate)(glob_max_pool_pan)\r\n \r\n ms_img_input = Input(shape = (int(pan_image_height_size / ms_to_pan_ratio), int(pan_image_width_size / ms_to_pan_ratio), \r\n n_bands))\r\n conv_1_ms = Conv2D(n1_ms, (3, 3), padding = 'same', activation = 'relu')(ms_img_input)\r\n conv_2_ms = Conv2D(n2_ms, (3, 3), padding = 'same', activation = 'relu')(conv_1_ms)\r\n conv_3_ms = Conv2D(n3_ms, (3, 3), padding = 'same', activation = 'relu')(conv_2_ms)\r\n glob_max_pool_ms = GlobalMaxPooling2D()(conv_3_ms)\r\n glob_max_pool_ms = Dropout(dropout_rate)(glob_max_pool_ms)\r\n \r\n all_features = concatenate([glob_max_pool_pan, glob_max_pool_ms])\r\n \r\n pred_layer = Dense(n_classes, activation = 'softmax')(all_features)\r\n \r\n tblcccnn_model = Model(inputs = [ms_img_input, pan_img_input], outputs = pred_layer)\r\n tblcccnn_model.compile(loss = 'categorical_crossentropy', optimizer = Adam(lr = l_r), \r\n metrics = ['categorical_crossentropy'])\r\n \r\n return tblcccnn_model",
"def fun_n_to_one_big(self, block_index, block_dim, nc1_size):\n ub_output_tmp = self.tik_instance.Tensor(\n \"float32\", (4, self.c_block_size), name=\"ub_output_tmp\",\n scope=tik.scope_ubuf)\n ub_output = self.tik_instance.Tensor(\n \"float32\", (1, self.c_block_size), name=\"ub_output\",\n scope=tik.scope_ubuf)\n ub_input = self.tik_instance.Tensor(\n \"float32\", (240*4, self.c_block_size), name=\"ub_input\",\n scope=tik.scope_ubuf)\n input_num = _ceil_div(self.in_size_h*self.in_size_w*16, 240*64)\n if input_num > 1:\n thread_num = 2\n else:\n thread_num = 1\n\n nc1 = self.batch_size*self.c1_size\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 2, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(0, nc1 - (block_dim - 1)*nc1_size)\\\n as nc1_index:\n self.tik_instance.vector_dup(MASK, ub_output_tmp, 0.0, 1, 1, 8)\n self.tik_instance.vector_dup(16, ub_output, 0.0, 1, 1, 8)\n with self.tik_instance.for_range(\n 0, input_num, thread_num=thread_num) as input_index:\n with self.tik_instance.if_scope(\n input_index != input_num - 1):\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, 8*240, 0, 0)\n self.tik_instance.vadd(MASK, ub_output_tmp[0],\n ub_input[0], ub_output_tmp[0],\n 240, 1, 1, 1, 0, 8, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16 +\n input_index*64*240],\n 0, 1, (self.in_size_h*self.in_size_w -\n input_index*4*240)*2, 0, 0)\n with self.tik_instance.for_range(\n 0, self.in_size_h*self.in_size_w -\n input_index*4*240) as tmp_index:\n self.tik_instance.vadd(16, ub_output[0],\n ub_input[tmp_index*16],\n ub_output[0], 1, 1,\n 1, 1, 0, 8, 0)\n self.tik_instance.vadd(16, ub_output[0],\n ub_output_tmp[0],\n ub_output[0],\n 4, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size + nc1_index)*16],\n ub_output[0], 0, 1, 2, 0, 0)",
"def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data",
"def __init__(self, num_in, num_out, g=1, stride=1, d=(1,1),norm=None):\r\n super(MFunit, self).__init__()\r\n num_mid = num_in if num_in <= num_out else num_out\r\n self.conv1x1x1_in1 = Conv3d_Block(num_in,num_in//4,kernel_size=1,stride=1,norm=norm)\r\n self.conv1x1x1_in2 = Conv3d_Block(num_in//4,num_mid,kernel_size=1,stride=1,norm=norm)\r\n self.conv3x3x3_m1 = DilatedConv3DBlock(num_mid,num_out,kernel_size=(3,3,3),stride=stride,g=g,d=(d[0],d[0],d[0]),norm=norm) # dilated\r\n self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(3,3,1),stride=1,g=g,d=(d[1],d[1],1),norm=norm)\r\n # self.conv3x3x3_m2 = DilatedConv3DBlock(num_out,num_out,kernel_size=(1,3,3),stride=1,g=g,d=(1,d[1],d[1]),norm=norm)\r\n\r\n # skip connection\r\n if num_in != num_out or stride != 1:\r\n if stride == 1:\r\n self.conv1x1x1_shortcut = Conv3d_Block(num_in, num_out, kernel_size=1, stride=1, padding=0,norm=norm)\r\n if stride == 2:\r\n # if MF block with stride=2, 2x2x2\r\n self.conv2x2x2_shortcut = Conv3d_Block(num_in, num_out, kernel_size=2, stride=2,padding=0, norm=norm) # params\r",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block",
"def ternausnetv1(input_shape=(512, 512, 3), base_depth=64):\n inputs = Input(input_shape)\n conv1 = Conv2D(base_depth, 3, activation='relu', padding='same')(inputs)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n\n conv2_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(pool1)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2_1)\n\n conv3_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(pool2)\n conv3_2 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(conv3_1)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3_2)\n\n conv4_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool3)\n conv4_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv4_1)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4_2)\n\n conv5_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool4)\n conv5_2 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(conv5_1)\n pool5 = MaxPooling2D(pool_size=(2, 2))(conv5_2)\n\n conv6_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(pool5)\n\n up7 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv6_1)\n concat7 = concatenate([up7, conv5_2])\n conv7_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat7)\n\n up8 = Conv2DTranspose(base_depth*4, 2, strides=(2, 2), activation='relu',\n padding='same')(conv7_1)\n concat8 = concatenate([up8, conv4_2])\n conv8_1 = Conv2D(base_depth*8, 3, activation='relu',\n padding='same')(concat8)\n\n up9 = Conv2DTranspose(base_depth*2, 2, strides=(2, 2), activation='relu',\n padding='same')(conv8_1)\n concat9 = concatenate([up9, conv3_2])\n conv9_1 = Conv2D(base_depth*4, 3, activation='relu',\n padding='same')(concat9)\n\n up10 = Conv2DTranspose(base_depth, 2, strides=(2, 2), activation='relu',\n padding='same')(conv9_1)\n concat10 = concatenate([up10, conv2_1])\n conv10_1 = Conv2D(base_depth*2, 3, activation='relu',\n padding='same')(concat10)\n\n up11 = Conv2DTranspose(int(base_depth/2), 2, strides=(2, 2),\n activation='relu', padding='same')(conv10_1)\n concat11 = concatenate([up11, conv1])\n\n out = Conv2D(1, 1, activation='sigmoid', padding='same')(concat11)\n\n return Model(input=inputs, output=out)",
"def build_resnet152(self):\n use_batch_norm = self.use_batch_norm\n\n imgs = tf.placeholder(tf.float32, [self.batch_size]+self.img_shape)\n is_train = tf.placeholder(tf.bool)\n\n conv1_feats = convolution(imgs, 7, 7, 64, 2, 2, 'conv1')\n conv1_feats = batch_norm(conv1_feats, 'bn_conv1', is_train, use_batch_norm)\n conv1_feats = nonlinear(conv1_feats, 'relu')\n pool1_feats = max_pool(conv1_feats, 3, 3, 2, 2, 'pool1')\n\n res2a_feats = self.basic_block(pool1_feats, 'res2a', 'bn2a', is_train, use_batch_norm, 64, 1)\n res2b_feats = self.basic_block2(res2a_feats, 'res2b', 'bn2b', is_train, use_batch_norm, 64)\n res2c_feats = self.basic_block2(res2b_feats, 'res2c', 'bn2c', is_train, use_batch_norm, 64)\n \n res3a_feats = self.basic_block(res2c_feats, 'res3a', 'bn3a', is_train, use_batch_norm, 128) \n temp = res3a_feats\n for i in range(1, 8):\n temp = self.basic_block2(temp, 'res3b'+str(i), 'bn3b'+str(i), is_train, use_batch_norm, 128)\n res3b7_feats = temp\n \n res4a_feats = self.basic_block(res3b7_feats, 'res4a', 'bn4a', is_train, use_batch_norm, 256)\n temp = res4a_feats\n for i in range(1, 36):\n temp = self.basic_block2(temp, 'res4b'+str(i), 'bn4b'+str(i), is_train, use_batch_norm, 256)\n res4b35_feats = temp\n\n res5a_feats = self.basic_block(res4b35_feats, 'res5a', 'bn5a', is_train, use_batch_norm, 512)\n res5b_feats = self.basic_block2(res5a_feats, 'res5b', 'bn5b', is_train, use_batch_norm, 512)\n res5c_feats = self.basic_block2(res5b_feats, 'res5c', 'bn5c', is_train, use_batch_norm, 512)\n\n res5c_feats_flat = tf.reshape(res5c_feats, [self.batch_size, 49, 2048])\n self.conv_feats = res5c_feats_flat\n self.conv_feat_shape = [49, 2048]\n self.num_ctx = 49 \n self.dim_ctx = 2048\n\n self.imgs = imgs\n self.is_train = is_train",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def TCN_V5(\n n_classes, \n feat_dim,\n max_len,\n gap=1,\n dropout=0.0,\n activation=\"relu\"):\n\n ROW_AXIS = 1\n CHANNEL_AXIS = 2\n \n initial_conv_len = 8\n initial_conv_num = 32\n\n config = [ \n [(1,8,32)],\n [(1,8,32)],\n [(1,8,32)],\n [(2,8,64)],\n [(1,8,64)],\n [(1,8,64)],\n ]\n\n input = Input(shape=(max_len,feat_dim))\n model = input\n\n model = Convolution1D(initial_conv_num, \n initial_conv_len,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=1)(model)\n\n for depth in range(0,len(config)):\n blocks = []\n for stride,filter_dim,num in config[depth]:\n ## residual block\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n relu = Activation(activation)(bn)\n dr = Dropout(dropout)(relu)\n conv = Convolution1D(num, \n filter_dim,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=stride)(dr)\n #dr = Dropout(dropout)(conv)\n\n\n ## potential downsample\n conv_shape = K.int_shape(conv)\n model_shape = K.int_shape(model)\n if conv_shape[CHANNEL_AXIS] != model_shape[CHANNEL_AXIS]:\n model = Convolution1D(num, \n 1,\n init=\"he_normal\",\n border_mode=\"same\",\n subsample_length=2)(model)\n\n ## merge block\n model = merge([model,conv],mode='sum',concat_axis=CHANNEL_AXIS)\n\n ## final bn+relu\n bn = BatchNormalization(mode=0, axis=CHANNEL_AXIS)(model)\n model = Activation(activation)(bn)\n\n\n if gap:\n pool_window_shape = K.int_shape(model)\n gap = AveragePooling1D(pool_window_shape[ROW_AXIS],\n stride=1)(model)\n flatten = Flatten()(gap)\n else:\n flatten = Flatten()(model)\n\n dense = Dense(output_dim=n_classes,\n init=\"he_normal\",\n activation=\"softmax\")(flatten)\n\n model = Model(input=input, output=dense)\n # optimizer = SGD(lr=0.01, momentum=0.9, decay=0.0, nesterov=True) \n # model.compile(loss='categorical_crossentropy', optimizer=optimizer,metrics=['accuracy'])\n return model",
"def mgcNetArchSkipMini(outLayer, l2_val, **kwargs):\n\n def_vals = {\"input_img_rows\" : 72,\n \"input_img_cols\" : 72,\n \"channels\" : 1,\n \"nb_classes\" : 13\n } # default parameters value\n\n for k, v in def_vals.items():\n kwargs.setdefault(k, v)\n\n input_img_rows = kwargs['input_img_rows']\n input_img_cols = kwargs['input_img_cols']\n channels = kwargs['channels']\n nb_classes = kwargs['nb_classes']\n\n \n # Input: 72 x 72 x 1\n img_shape = layers.Input(shape = (input_img_rows, input_img_cols, channels))\n\n # Layer 1\n #------------------------\n conv1 = layers.Conv2D(filters=8, kernel_size=(2, 2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(img_shape)\n conv1 = layers.Activation('relu')(conv1)\n conv1 = layers.MaxPooling2D(pool_size=(2, 2))(conv1)\n conv1 = layers.Dropout(0.4)(conv1)\n\n # Layer 2\n #------------------------\n conv2 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv1)\n conv2 = layers.Activation('relu')(conv2) \n conv2 = layers.Conv2D(filters=64, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv2)\n conv2 = layers.Dropout(0.4)(conv2)\n\n # Layer 3\n #------------------------\n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv2)\n conv3 = layers.Activation('relu')(conv3) \n conv3 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv3)\n conv3 = layers.Dropout(0.4)(conv3)\n \n # skip connect 1\n #shortcut_layer = layers.Conv2D(filters=64, kernel_size=(1,1), padding='same', activation='relu', strides = 4)(conv1)\n shortcut_layer = layers.Conv2D(filters=16, kernel_size=(1,1), padding='same', activation='relu', strides = 8)(img_shape)\n \n conv3 = layers.add([shortcut_layer, conv3])\n #conv3 = layers.Concatenate()([shortcut_layer,conv3]) \n\n # Layer 4\n #------------------------\n conv4 = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', dilation_rate = (2, 2), kernel_regularizer=regularizers.l2(l2_val))(conv3)\n conv4 = layers.Activation('relu')(conv4)\n conv4 = layers.Conv2D(filters=16, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(conv4)\n conv4 = layers.Dropout(0.4)(conv4)\n\n # Layer 5\n #------------------------\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', kernel_regularizer=regularizers.l2(l2_val))(conv3) # skip layer 4\n output = layers.Activation('relu')(output)\n output = layers.Conv2D(filters=32, kernel_size=(2,2), padding='same', activation='relu', strides = 2)(output) \n output = layers.Dropout(0.4)(output)\n \n # skip connect 2\n shortcut_layer2 = layers.Conv2D(filters=32, kernel_size=(1,1), padding='same', activation='relu', strides = 2)(conv3)\n output = layers.add([shortcut_layer2, output])\n \n # FC Layer\n #------------------------\n outputmlp = layers.Flatten()(output)\n outputmlp = layers.Dense(32, activation = 'relu')(outputmlp)\n outputmlp = layers.Dropout(0.5)(outputmlp)\n\n predictionsMlp = layers.Dense(nb_classes, activation='softmax')(outputmlp)\n \n \n # global averaging\n weight_decay=1E-4\n concat_axis = 1\n \n x = BatchNormalization(axis=concat_axis,\n gamma_regularizer=regularizers.l2(weight_decay),\n beta_regularizer=regularizers.l2(weight_decay))(output)\n x = Activation('relu')(x)\n x = layers.Dropout(0.4)(x)\n x = GlobalAveragePooling2D(data_format=K.image_data_format())(x)\n \n predictionsGloAvg = layers.Dense(nb_classes,\n activation='softmax',\n kernel_regularizer=regularizers.l2(weight_decay),\n bias_regularizer=regularizers.l2(weight_decay))(x)\n \n if outLayer == \"gloAvg\":\n predictions = predictionsGloAvg\n elif outLayer == \"mlp\":\n predictions = predictionsMlp\n \n # prediction model\n model = Model(img_shape, predictions, name = 'skipconnect')\n\n return model"
] | [
"0.6570107",
"0.6452104",
"0.6403361",
"0.6281001",
"0.6181994",
"0.61439276",
"0.6123263",
"0.6106863",
"0.6076191",
"0.6075117",
"0.60421383",
"0.59789044",
"0.5971289",
"0.5951675",
"0.59458995",
"0.5943738",
"0.5920389",
"0.5881937",
"0.5872571",
"0.58522433",
"0.5844439",
"0.58188236",
"0.57995915",
"0.57918257",
"0.5790851",
"0.5787548",
"0.5780075",
"0.5764218",
"0.5763967",
"0.5744042"
] | 0.6571941 | 0 |
mnasneta1 w.t. 5x5MBconv6SE block only | def mnasneta1_5x5mbconv6se(pretrained=False, progress=False, **kwargs):
return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],
kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],
dropout=0, pretrained=pretrained, progress=progress, **kwargs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Unet4(shape, nb_filters=32, exp=1, kernel_size=3, initialization=\"glorot_uniform\", activation=\"relu\", sigma_noise=0, output_channels=1, drop=0.0, regularization=None):\n \n \n input_layer = Input(shape=shape)\n\n conv1 = ConvBlock(input_layer, nb_filters=nb_filters, kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool1 = MaxPooling2D(pool_size=(2, 2))(conv1)\n if drop > 0.0: pool1 = Dropout(drop)(pool1)\n\n conv2 = ConvBlock(pool1, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool2 = MaxPooling2D(pool_size=(2, 2))(conv2)\n if drop > 0.0: pool2 = Dropout(drop)(pool2)\n\n conv3 = ConvBlock(pool2, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool3 = MaxPooling2D(pool_size=(2, 2))(conv3)\n if drop > 0.0: pool3 = Dropout(drop)(pool3)\n\n conv4 = ConvBlock(pool3, nb_filters=nb_filters * 2 **(3 * exp), kernel_size=kernel_size, initializer=initialization, activation=activation, regularization=regularization)\n pool4 = MaxPooling2D(pool_size=(2, 2))(conv4)\n if drop > 0.0: pool4 = Dropout(drop)(pool4)\n\n deconv5 = DeconvBlock(conv4, residual=conv3, nb_filters=nb_filters * 2 **(2 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv5 = Dropout(drop)(deconv5)\n\n deconv6 = DeconvBlock(deconv5, residual=conv2, nb_filters=nb_filters * 2 **(1 * exp), kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv6 = Dropout(drop)(deconv6)\n\n deconv7 = DeconvBlock(deconv6, residual=conv1, nb_filters=nb_filters, kernel_size=kernel_size, regularization=regularization)\n if drop > 0.0: deconv7 = Dropout(drop)(deconv7)\n\n if sigma_noise > 0:\n deconv7 = GaussianNoise(sigma_noise)(deconv7)\n\n output_layer = Conv2D(filters=output_channels, kernel_size=(1, 1))(deconv7)\n output_layer = BatchNormalization()(output_layer)\n output_layer = Activation('softmax')(output_layer)\n\n model = Model(inputs=input_layer, outputs=output_layer, name='Unet')\n return model",
"def mnasneta1_3x3mbconv6se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mnasneta1_5x5mbconv3se(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[True, True, True, True, True, True],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def inception_block_1a(X):\n\tX_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name ='inception_3a_3x3_conv1')(X)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name = 'inception_3a_3x3_bn1')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n\tX_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n\tX_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n\tX_3x3 = Activation('relu')(X_3x3)\n\tX_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n\tX_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n\tX_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n\tX_5x5 = Activation('relu')(X_5x5)\n\tX_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n\tX_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n\tX_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n\tX_pool = Activation('relu')(X_pool)\n\tX_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\tX_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n\tX_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n\tX_1x1 = Activation('relu')(X_1x1)\n\t# CONCAT\n\tinception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\treturn inception",
"def inception_block_1a(X):\n\n X_3x3 = Conv2D(96, (1, 1), data_format='channels_first', name='inception_3a_3x3_conv1')(X)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn1')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n X_3x3 = ZeroPadding2D(padding=(1, 1), data_format='channels_first')(X_3x3)\n X_3x3 = Conv2D(128, (3, 3), data_format='channels_first', name='inception_3a_3x3_conv2')(X_3x3)\n X_3x3 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_3x3_bn2')(X_3x3)\n X_3x3 = Activation('relu')(X_3x3)\n\n X_5x5 = Conv2D(16, (1, 1), data_format='channels_first', name='inception_3a_5x5_conv1')(X)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn1')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n X_5x5 = ZeroPadding2D(padding=(2, 2), data_format='channels_first')(X_5x5)\n X_5x5 = Conv2D(32, (5, 5), data_format='channels_first', name='inception_3a_5x5_conv2')(X_5x5)\n X_5x5 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_5x5_bn2')(X_5x5)\n X_5x5 = Activation('relu')(X_5x5)\n\n X_pool = MaxPooling2D(pool_size=3, strides=2, data_format='channels_first')(X)\n X_pool = Conv2D(32, (1, 1), data_format='channels_first', name='inception_3a_pool_conv')(X_pool)\n X_pool = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_pool_bn')(X_pool)\n X_pool = Activation('relu')(X_pool)\n X_pool = ZeroPadding2D(padding=((3, 4), (3, 4)), data_format='channels_first')(X_pool)\n\n X_1x1 = Conv2D(64, (1, 1), data_format='channels_first', name='inception_3a_1x1_conv')(X)\n X_1x1 = BatchNormalization(axis=1, epsilon=0.00001, name='inception_3a_1x1_bn')(X_1x1)\n X_1x1 = Activation('relu')(X_1x1)\n\n # CONCAT\n inception = concatenate([X_3x3, X_5x5, X_pool, X_1x1], axis=1)\n\n return inception",
"def discriminator_block_conditionnal(self, name):\n \n if self.fit_mask : \n \n im = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n \n else :\n # In:\n im = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n\n res = Input(shape=(1, self.image_row, self.image_column, self.image_depth), name='dis_input_res')\n\n inputs = Concatenate(axis=-4)([im, res])\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n\n \n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[im, res], outputs=[decision], name=name)\n\n return model",
"def test_se_block(self):\n images = tf.zeros((10, 128, 128, 3), dtype=tf.float32)\n global_params = efficientnet_model.GlobalParams(\n 1.0,\n 1.0,\n 0,\n 'channels_last',\n num_classes=10,\n batch_norm=utils.TpuBatchNormalization)\n blocks_args = [\n efficientnet_model.BlockArgs(\n kernel_size=3,\n num_repeat=3,\n input_filters=3,\n output_filters=6,\n expand_ratio=6,\n id_skip=False,\n strides=[2, 2],\n se_ratio=0.8,\n conv_type=0,\n fused_conv=0,\n super_pixel=0)\n ]\n model = efficientnet_model.Model(blocks_args, global_params)\n outputs = model(images, training=True)\n self.assertEqual((10, 10), outputs[0].shape)",
"def learn_test(data, radio=0, nmk=0, maxif=0, minif=0, num_board=1, verbose=0):\r\n train = data\r\n nmNk = nmk*1024\r\n print('nmk, nmNk', nmk, nmNk)\r\n if verbose >= 1:\r\n print (\"\\n\\n\\n\\n\\ntest_minimal_01\")\r\n\r\n network = cm1k.CM1KEmulator(network_size=num_board * 1024)\r\n assert(not network.euclidean_norm)\r\n\r\n network.write_maxif(maxif)\r\n network.write_minif(minif)\r\n read_neuron_count=[0]\r\n # Train network(RBF Learning)\r\n if radio==1:\r\n\r\n l=len(train)\r\n iteration=0\r\n ID=0\r\n for i in range (0,2):\r\n if ID != l:#&(read_neuron_count[iteration] <= nmNk) :\r\n for input in train:\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn(pattern, cat, context)\r\n read_neuron_count.append(network.read_ncount())\r\n ID, UNC_c, UNC_i, UNK,total_detail, cla_result, null = classify(train, network, radio=5, spinvalue=3, num_board=1, verbose=1)\r\n iteration+=1\r\n # print(network.read_ncount())\r\n\r\n else :\r\n break\r\n #assert(network.read_ncount() == 3)\r\n # print('iteration', iteration)\r\n # print('maxif', maxif)\r\n # print('minif', minif)\r\n #print('network.register_legend[NSR] : ',network.register_legend['NSR'])\r\n # Write all sampels\r\n elif radio == 9:\r\n iteration = 0\r\n for input in train:\r\n # if verbose >= 1:\r\n # print \"================================================================================\"\r\n input_comps = [int(x) for x in input]\r\n context = input_comps[0]\r\n cat = input_comps[1]\r\n pattern = input_comps[2:]\r\n network.learn_write_all(pattern, cat, context)\r\n # print(network.read_ncount())\r\n #elif radio==2: #Deep RBF\r\n return network, iteration, read_neuron_count",
"def build_bisenet(inputs, num_classes):\n\n ### The spatial path\n ### The number of feature maps for each convolution is not specified in the paper\n ### It was chosen here to be equal to the number of feature maps of a classification\n ### model at each corresponding stage\n # spatial_net = fluid.layers.resize_bilinear(inputs, [Image_Height/8, Image_Width/8])\n # print('spatial_net_1',spatial_net)\n\n ## spatial path\n spatial_net = ConvBlock(inputs, num_filters=64, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=128, kernel_size=3, stride=2)\n spatial_net = ConvBlock(spatial_net, num_filters=256, kernel_size=3, stride=2)\n # print(\"spatial_net:\", spatial_net)\n\n # spatial_net = fluid.layers.resize_bilinear(spatial_net, [Image_Height/8, Image_Width/8])\n # print('spatial_net_2',spatial_net)\n ### Context path\n model = ResNet(is_test=False)\n # spatial_net = model.bottleneck_block1(inputs)\n end_points_16, end_points_32 = model.net(inputs)\n net_4 = AttentionRefinementModule(end_points_16, num_filters=512)\n net_5 = AttentionRefinementModule(end_points_32, num_filters=1024)\n global_channels = fluid.layers.reduce_mean(net_5, [2, 3], keep_dim=True)\n net_5_scaled = fluid.layers.elementwise_mul(net_5, global_channels, axis=0)\n\n ### Combining the paths\n net_4 = Upsample(net_4, scale=2)\n net_5_scaled = Upsample(net_5_scaled, scale=4)\n # print('net_4, net_5:', [net_4, net_5_scaled])\n # layers_concat = list()\n # layers_concat.append(spatial_net)\n ## layers_concat.append(net_4)\n # layers_concat.append(net_5_scaled)\n context_net = fluid.layers.concat([spatial_net, net_4, net_5_scaled], axis=1) #\n # context_net = fluid.layers.concat(input=layers_concat,axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([net_4, net_5_scaled], axis=1)\n # print('context_net', context_net)\n # context_net = fluid.layers.concat([spatial_net,context_net], axis=1)\n # print('context_net2',context_net)\n\n ### FFM\n # net = FeatureFusionModule(input_1=spatial_net, input_2=context_net, num_filters=num_classes)\n net = FeatureFusionModule(inputs=context_net, num_filters=num_classes)\n\n # print('net', net)\n\n ## [batch_zize, num_filters, 128, 64]\n\n ### Final upscaling and finish\n # net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 128])\n # print('conv2d_transpose', net)\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn1')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[128, 256])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn2')\n net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[256, 512])\n net = batch_normalization(net, relu=True, name='conv2d_transpose_bn3')\n #net = fluid.layers.conv2d_transpose(input=net, num_filters=num_classes, output_size=[512, 1024])\n #net = batch_normalization(net, relu=True, name='conv2d_transpose_bn4')\n # print('net',net)\n net = fluid.layers.image_resize(net, out_shape=[512, 1024], resample='BILINEAR')\n\n net = fluid.layers.conv2d(net, num_classes, 1)\n return net",
"def mnasneta1_3x3mbconv6(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[6, 6, 6, 6, 6, 6],\n kernel_sizes=[3, 3, 3, 3, 3, 3], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def mobilenetv2 (inputs, k, alpha = 1.0, train_bn = False):\n\n x = conv_block(inputs, 32, alpha, (3, 3), strides=(2, 2), block_id=0, train_bn=train_bn) # Input Res: 1\n\n x = inverted_residual_block(x, 16, (3, 3), t=1, strides=1, n=1, alpha=1.0, block_id=1, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 24, (3, 3), t=6, strides=2, n=2, alpha=1.0, block_id=2, train_bn=train_bn)\t# Input Res: 1/2\n x = inverted_residual_block(x, 32, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=4, train_bn=train_bn)\t# Input Res: 1/4\n x = inverted_residual_block(x, 64, (3, 3), t=6, strides=2, n=4, alpha=1.0, block_id=7, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 96, (3, 3), t=6, strides=1, n=3, alpha=1.0, block_id=11, train_bn=train_bn)\t# Input Res: 1/8\n x = inverted_residual_block(x, 160, (3, 3), t=6, strides=2, n=3, alpha=1.0, block_id=14, train_bn=train_bn)\t# Input Res: 1/16\n x = inverted_residual_block(x, 320, (3, 3), t=6, strides=1, n=1, alpha=1.0, block_id=17, train_bn=train_bn)\t# Input Res: 1/32\n\n x = conv_block(x, 1280, alpha, (1, 1), strides=(1, 1), block_id=18, train_bn=train_bn) # Input Res: 1/32\n\n x = KL.GlobalAveragePooling2D()(x)\n x = KL.Reshape((1, 1, 1280))(x)\n x = KL.Dropout(0.3, name='Dropout')(x)\n x = KL.Conv2D(k, (1, 1), padding='same')(x)\n\n x = KL.Activation('softmax', name='softmax')(x)\n output = KL.Reshape((k,))(x)\n\n model = KM.Model(inputs, output)\n plot_model(model, to_file='MobileNetv2.png', show_shapes=True)\n\n return model",
"def pretrain_bm_net(self, n_epochs=25):\n optimizer = torch.optim.Adam(self.model.bm_net.parameters(), lr = self.lr*5)\n if self.bmloss_type == 'mse':\n criterion = MSELoss()\n elif self.bmloss_type == 'cos':\n criterion = CosineEmbeddingLoss()\n self.model.bm_net.train()\n self.model.bm_net.to(self.device)\n for epoch in range(n_epochs):\n self.model.bm_net.zero_grad()\n optimizer.zero_grad()\n cur_loss = []\n for batch_idx, (uids, feats, _, feats_len) in enumerate(self.model.loader):\n feats = feats.to(self.device).float()\n loss = 0\n out, out_len = self.model.bm_net(feats, feats_len)\n for idx in np.arange(len(out_len)):\n if self.bmloss_type == 'cos':\n # loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.cuda.LongTensor([1]))\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :], torch.LongTensor([1]).to(self.device))\n else:\n loss += criterion(out[idx, :out_len[idx]-1, :], feats[idx, 1:out_len[idx], :])\n # print('--------')\n # print(torch.isnan(out[idx, :out_len[idx]-1, :]).sum(), torch.isnan(feats[idx, :out_len[idx]-1, :]).sum())\n # print(torch.isnan(out).sum(), torch.isnan(feats).sum())\n # print(loss)\n loss.backward()\n cur_loss.append(loss.item())\n nn.utils.clip_grad_norm_(self.model.bm_net.parameters(), 5)\n optimizer.step()\n optimizer.zero_grad()\n self.model.bm_net.zero_grad()\n self.logger.info(f'BM Module pretrain, Epoch {epoch+1}/{n_epochs}: loss {round(np.mean(cur_loss), 8)}')",
"def __init__(self, block, layers, groups, reduction, dropout_p=0.2, in_channels=3,\n inplanes=128, input_3x3=True, downsample_kernel_size=3,\n downsample_padding=1, num_classes=1000):\n super(SENet, self).__init__()\n \n self.in_channels = in_channels\n self.inplanes = inplanes\n\n if input_3x3:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, 64, 3, stride=2, padding=1,\n bias=False)),\n ('bn1', nn.BatchNorm2d(64)),\n ('relu1', nn.ReLU(inplace=True)),\n ('conv2', nn.Conv2d(64, 64, 3, stride=1, padding=1,\n bias=False)),\n ('bn2', nn.BatchNorm2d(64)),\n ('relu2', nn.ReLU(inplace=True)),\n ('conv3', nn.Conv2d(64, inplanes, 3, stride=1, padding=1,\n bias=False)),\n ('bn3', nn.BatchNorm2d(inplanes)),\n ('relu3', nn.ReLU(inplace=True)),\n ]\n else:\n layer0_modules = [\n ('conv1', nn.Conv2d(in_channels, inplanes, kernel_size=7, stride=2,\n padding=3, bias=False)),\n ('bn1', nn.BatchNorm2d(inplanes)),\n ('relu1', nn.ReLU(inplace=True)),\n ]\n\n layer0_modules.append(('pool', nn.MaxPool2d(3, stride=2, ceil_mode=True)))\n \n self.layer0 = nn.Sequential(OrderedDict(layer0_modules))\n \n self.layer1 = self._make_layer(\n block,\n planes=64,\n blocks=layers[0],\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=1,\n downsample_padding=0\n )\n \n self.layer2 = self._make_layer(\n block,\n planes=128,\n blocks=layers[1],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer3 = self._make_layer(\n block,\n planes=256,\n blocks=layers[2],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.layer4 = self._make_layer(\n block,\n planes=512,\n blocks=layers[3],\n stride=2,\n groups=groups,\n reduction=reduction,\n downsample_kernel_size=downsample_kernel_size,\n downsample_padding=downsample_padding\n )\n \n self.avg_pool = nn.AvgPool2d(7, stride=1)\n self.dropout = nn.Dropout2d(dropout_p) if dropout_p is not None else None\n self.last_linear = nn.Linear(512 * block.expansion, num_classes)",
"def mnasneta1_5x5mbconv3(pretrained=False, progress=False, **kwargs):\n return _mnasnet('mnasneta1', MBConv, layers=[2, 3, 4, 2, 3, 1], expansions=[3, 3, 3, 3, 3, 3],\n kernel_sizes=[5, 5, 5, 5, 5, 5], SE=[False, False, False, False, False, False],\n dropout=0, pretrained=pretrained, progress=progress, **kwargs)",
"def block_inception_c(blk, net):\n # By default use stride=1 and SAME padding\n s = net.add(Split('%s/Split' % blk, 4))\n br0 = conv2d(net, '%s/Branch_0/Conv2d_0a_1x1' % blk, 256, 1, src=s)\n\n br1 = conv2d(net, '%s/Branch_1/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br1 = net.add(Split('%s/Branch_1/Split' % blk, 2))\n br10 = conv2d(net, '%s/Branch_1/Conv2d_0b_1x3' % blk, 256, (1, 3), src=br1)\n br11 = conv2d(net, '%s/Branch_1/Conv2d_0c_3x1' % blk, 256, (3, 1), src=br1)\n br1 = net.add(Concat('%s/Branch_1/Concat' % blk, 1), [br10, br11])\n\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0a_1x1' % blk, 384, 1, src=s)\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0b_3x1' % blk, 448, (3, 1))\n br2 = conv2d(net, '%s/Branch_2/Conv2d_0c_1x3' % blk, 512, (1, 3))\n br2 = net.add(Split('%s/Branch_2/Split' % blk, 2))\n br20 = conv2d(net, '%s/Branch_2/Conv2d_0d_1x3' % blk, 256, (1, 3), src=br2)\n br21 = conv2d(net, '%s/Branch_2/Conv2d_0e_3x1' % blk, 256, (3, 1), src=br2)\n br2 = net.add(Concat('%s/Branch_2/Concat' % blk, 1), [br20, br21])\n\n br3 = net.add(AvgPooling2D('%s/Branch_3/AvgPool_0a_3x3' % blk, 3, 1), s)\n br3 = conv2d(net, '%s/Branch_3/Conv2d_0b_1x1' % blk, 256, 1)\n return net.add(Concat('%s/Concat' % blk, 1), [br0, br1, br2, br3])",
"def __init__(self, id, node_type=NodeType.HIDDEN, activation=F.relu, layer_type=nn.Conv2d,\n conv_window_size=3, conv_stride=1, max_pool_size=2):\n\n super(ModuleNEATNode, self).__init__(id, node_type)\n\n batch_norm_chance = 0.65 # chance that a new node will start with batch norm\n use_batch_norm = random.random() < batch_norm_chance\n\n dropout_chance = 0.2 # chance that a new node will start with drop out\n use_dropout = random.random() < dropout_chance\n\n max_pool_chance = 0.3 # chance that a new node will start with drop out\n use_max_pool = random.random() < max_pool_chance\n\n self.activation = Mutagen(F.relu, F.leaky_relu, torch.sigmoid, F.relu6,\n discreet_value=activation, name=\"activation function\",\n mutation_chance=0.15) # TODO try add in Selu, Elu\n\n conv_out_features = 25 + random.randint(0, 25)\n linear_out_features = 100 + random.randint(0, 100)\n\n linear_submutagens = \\\n {\n \"regularisation\": Mutagen(None, nn.BatchNorm1d,\n discreet_value=nn.BatchNorm1d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout, discreet_value=nn.Dropout if use_dropout else None, sub_mutagens=\n {\n nn.Dropout: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.15, start_range=0,\n end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=linear_out_features,\n start_range=10,\n end_range=1024, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n conv_submutagens = {\n \"conv_window_size\": Mutagen(3, 5, 7, discreet_value=conv_window_size, mutation_chance=0.13),\n\n \"conv_stride\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_stride, start_range=1,\n end_range=5),\n\n \"reduction\": Mutagen(None, nn.MaxPool2d, discreet_value=nn.MaxPool2d if use_max_pool else None,\n sub_mutagens=\n {\n nn.MaxPool2d: {\"pool_size\": Mutagen(\n value_type=ValueType.WHOLE_NUMBERS, current_value=max_pool_size, start_range=2,\n end_range=5)}\n }, mutation_chance=0.15),\n\n \"regularisation\": Mutagen(None, nn.BatchNorm2d, discreet_value=nn.BatchNorm2d if use_batch_norm else None,\n mutation_chance=0.15),\n\n \"dropout\": Mutagen(None, nn.Dropout2d, discreet_value=nn.Dropout2d if use_dropout else None, sub_mutagens=\n {\n nn.Dropout2d: {\n \"dropout_factor\": Mutagen(value_type=ValueType.CONTINUOUS, current_value=0.1,\n start_range=0, end_range=0.75)}\n }, mutation_chance=0.08),\n\n \"out_features\": Mutagen(value_type=ValueType.WHOLE_NUMBERS, current_value=conv_out_features, start_range=1,\n end_range=100, name=\"num out features\", mutation_chance=0.22,\n distance_weighting=Props.LAYER_SIZE_COEFFICIENT if Config.allow_attribute_distance else 0)\n }\n\n if use_linears and not use_convs:\n self.layer_type = Mutagen(nn.Linear, discreet_value=nn.Linear,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Linear: linear_submutagens}\n )\n if use_convs and not use_linears:\n self.layer_type = Mutagen(nn.Conv2d, discreet_value=nn.Conv2d,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={nn.Conv2d: conv_submutagens})\n if use_convs and use_linears:\n self.layer_type = Mutagen(nn.Conv2d, nn.Linear, discreet_value=layer_type,\n distance_weighting=Props.LAYER_TYPE_COEFFICIENT if Config.allow_attribute_distance else 0,\n sub_mutagens={\n nn.Conv2d: conv_submutagens,\n nn.Linear: linear_submutagens\n }, name=\"deep layer type\", mutation_chance=0.08)",
"def sn_non_local_block_sim(x, training=True, name='sn_nonlocal'):\n with tf.variable_scope(name):\n _, h, w, num_channels = x.shape.as_list()\n location_num = h * w\n downsampled_num = location_num // 4\n\n # theta path\n theta = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_theta')\n theta = tf.reshape(\n theta, [-1, location_num, num_channels // 8])\n\n # phi path\n phi = sn_conv1x1(x, num_channels // 8, training, 'sn_conv_phi')\n phi = tf.layers.max_pooling2d(inputs=phi, pool_size=[2, 2], strides=2)\n phi = tf.reshape(\n phi, [-1, downsampled_num, num_channels // 8])\n\n attn = tf.matmul(theta, phi, transpose_b=True)\n attn = tf.nn.softmax(attn)\n\n # g path\n g = sn_conv1x1(x, num_channels // 2, training, 'sn_conv_g')\n g = tf.layers.max_pooling2d(inputs=g, pool_size=[2, 2], strides=2)\n g = tf.reshape(\n g, [-1, downsampled_num, num_channels // 2])\n\n attn_g = tf.matmul(attn, g)\n attn_g = tf.reshape(attn_g, [-1, h, w, num_channels // 2])\n sigma = tf.get_variable(\n 'sigma_ratio', [], initializer=tf.initializers.constant(0.0))\n attn_g = sn_conv1x1(attn_g, num_channels, training, 'sn_conv_attn')\n return x + sigma * attn_g",
"def fun_n_to_one_small(self, block_index, block_dim, nc1_size):\n nc1 = self.batch_size*self.c1_size\n in_size_w_num = _ceil_div(self.in_size_w, 4)\n with self.tik_instance.if_scope(block_index != block_dim - 1):\n with self.tik_instance.for_range(0, nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_input\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0], self.grads_gm[(block_index*nc1_size +\n nc1_index) * self.in_size_h *\n self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3,\n 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w - (\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16],\n ub_input[0], self.in_size_w-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)\n with self.tik_instance.else_scope():\n with self.tik_instance.for_range(\n 0, nc1 - (block_dim - 1)*nc1_size) as nc1_index:\n ub_input = self.tik_instance.Tensor(\n \"float32\", (self.in_size_h, self.in_size_w,\n self.c_block_size),\n name=\"ub_output\", scope=tik.scope_ubuf)\n self.tik_instance.data_move(\n ub_input[0],\n self.grads_gm[(block_index*nc1_size + nc1_index) *\n self.in_size_h*self.in_size_w*16],\n 0, 1, self.in_size_h*self.in_size_w*2, 0, 0)\n if in_size_w_num > 1:\n with self.tik_instance.for_range(0, in_size_w_num) \\\n as w_in_index:\n with self.tik_instance.if_scope(\n w_in_index != in_size_w_num - 1):\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n MASK, ub_input[w_in_index*64],\n ub_input[w_in_index*64 + self.in_size_w*16],\n ub_input[w_in_index*64], self.in_size_h-1,\n 1, 1, 1, 0, self.in_size_w*2, 0)\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64], 3, 1, 1, 1, 0, 2, 0)\n with self.tik_instance.else_scope():\n if self.in_size_h != 1:\n self.tik_instance.vadd((self.in_size_w-(\n in_size_w_num-1)*4)*16,\n ub_input[w_in_index*64],\n ub_input[w_in_index*64 +\n self.in_size_w *\n 16],\n ub_input[w_in_index*64],\n self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w *\n 2, 0)\n if self.in_size_w-(in_size_w_num-1)*4 > 1:\n self.tik_instance.vadd(\n 16, ub_input[w_in_index*64],\n ub_input[w_in_index*64+16],\n ub_input[w_in_index*64],\n self.in_size_w-(in_size_w_num-1)*4-1,\n 1, 1, 1, 0, 2, 0)\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[64], ub_input[0],\n in_size_w_num-1, 1, 1, 1, 0, 8, 0)\n else:\n if self.in_size_h != 1:\n self.tik_instance.vadd(\n self.in_size_w*16, ub_input[0],\n ub_input[self.in_size_w*16],\n ub_input[0], self.in_size_h-1, 1,\n 1, 1, 0, self.in_size_w*2, 0)\n if self.in_size_w != 1:\n self.tik_instance.vadd(\n 16, ub_input[0], ub_input[16], ub_input[0],\n self.in_size_w-1, 1, 1, 1, 0, 2, 0)\n self.tik_instance.data_move(\n self.output_gm[(block_index*nc1_size+nc1_index)*16],\n ub_input[0], 0, 1, 2, 0, 0)",
"def identity_block(inputs, kernel_size, filters, stage, block,\n training):\n nb_filter1, nb_filter2, nb_filter3 = filters\n\n scope_name = 'conv'+str(stage)+block+'_branch'\n scope_name = 'bn'+str(stage)+block+'_branch'\n\n x = slim.conv2d(inputs,nb_filter1,[1,1],stride=[1,1],padding='VALID',activation_fn=None,\n trainable=training,scope=scope_name+'2a')\n x = slim.batch_norm(x,scope=scope_name+'2a',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter2,[kernel_size,kernel_size],stride=[1,1],padding='SAME',\n activation_fn=None,trainable=training,scope=scope_name+'2b')\n x = slim.batch_norm(x,scope=scope_name+'2b',is_training=training)\n x = tf.nn.relu(x)\n\n\n x = slim.conv2d(x,nb_filter3,[1,1],stride=[1,1],padding='VALID',\n activation_fn=None,trainable=training,scope=scope_name+'2c')\n x = slim.batch_norm(x,scope=scope_name+'2c',is_training=training)\n\n \n x = tf.add(x,inputs)\n x = tf.nn.relu(x,name='res'+str(stage)+block+\"_out\")\n return x",
"def discriminator_block(in_filters, out_filters):\n layers = [ nn.Conv2d(in_filters, out_filters, 4, stride=2, padding=1),\n nn.LeakyReLU(0.01)]\n return layers",
"def resnet50_base(freeze_blocks=[1,2,3], weight_regularizer=None, bias_regularizer=None):\n img_input = Input(shape=(None, None, 3))\n bn_axis = 3\n train1 = 1 not in freeze_blocks\n x = Conv2D(64, (7, 7), strides=(2, 2), padding='same', name='conv1', trainable=train1,\n kernel_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)(img_input)\n x = BatchNormalization(axis=bn_axis, name='bn_conv1', trainable=False)(x, training=False)\n x = Activation('relu')(x)\n x = MaxPooling2D((3, 3), strides=(2, 2))(x)\n\n train2 = 2 not in freeze_blocks\n x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1), trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='b', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [64, 64, 256], stage=2, block='c', trainable=train2,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train3 = 3 not in freeze_blocks\n x = conv_block(x, 3, [128, 128, 512], stage=3, block='a', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='b', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='c', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [128, 128, 512], stage=3, block='d', trainable=train3,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n train4 = 4 not in freeze_blocks\n x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f', trainable=train4,\n weight_regularizer=weight_regularizer, bias_regularizer=bias_regularizer)\n\n base_model = Model(img_input, x, name='resnet50')\n\n return base_model",
"def temp_ann(S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3, I_SHSTA_0, I_SHSTA_1,\n I_SHSTA_2, I_SHSTA_3, C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3):\n # Construct input array.\n x = np.array([S_SHSTA_0, S_SHSTA_1, S_SHSTA_2, S_SHSTA_3,\n I_SHSTA_0, I_SHSTA_1, I_SHSTA_2, I_SHSTA_3,\n C_KSWCK_0, C_KSWCK_1, C_KSWCK_2, C_KSWCK_3])\n # Pass through hidden layer 1.\n W1 = np.loadtxt('W1.txt')\n B1 = np.loadtxt('B1.txt')\n h1 = relu(np.dot(W1, x) + B1)\n # Pass through hidden layer 2.\n W2 = np.loadtxt('W2.txt')\n B2 = np.loadtxt('B2.txt')\n h2 = relu(np.dot(W2, h1) + B2)\n # Pass through hidden layer 3.\n W3 = np.loadtxt('W3.txt')\n B3 = np.loadtxt('B3.txt')\n h3 = relu(np.dot(W3, h2) + B3)\n # Pass through hidden layer 4.\n W4 = np.loadtxt('W4.txt')\n B4 = np.loadtxt('B4.txt')\n h4 = relu(np.dot(W4, h3) + B4)\n # Pass through output layer.\n WO = np.loadtxt('WO.txt')\n BO = np.loadtxt('BO.txt')\n y = relu(np.dot(WO, h4) + BO)\n print(y)\n # Return result.\n return y",
"def SSRNBlock(Yhat,c,Fo,scope='SSRNBlock',reuse=None):\n\n with tf.variable_scope(scope,reuse=reuse):\n conv_params = {\"filters\":c,\"kernel_size\":1,\"dilation_rate\":1,\"padding\":'same'} \n deconv_params = {\"filters\":c,\"kernel_size\":2,\"strides\":2,\"padding\":'same'} \n with tf.variable_scope('C_layer1', reuse): # conv 1\n L1 = conv1d(Yhat,**conv_params)\n with tf.variable_scope('HC_block1', reuse): # hc block 1\n L2_1 = highway_activation_conv(L1,kernel_size=3,padding='same',scope='HC1')\n L2_2 = highway_activation_conv(L2_1,kernel_size=3,dilation_rate=3,padding='same',scope='HC2')\n with tf.variable_scope('D_block1', reuse): # deconv block 1\n L3_1 = conv1d_transpose(L2_2,**deconv_params)\n L3_2 = highway_activation_conv(L3_1,kernel_size=3,padding='same',scope='HC1') \n L3_3 = highway_activation_conv(L3_2,kernel_size=3,dilation_rate=3,padding='same',scope='HC2') \n with tf.variable_scope('D_block2', reuse): # deconv block 2\n L4_1 = conv1d_transpose(L3_3,**deconv_params)\n L4_2 = highway_activation_conv(L4_1,kernel_size=3,padding='same',scope='HC1') \n L4_3 = highway_activation_conv(L4_2,kernel_size=3,dilation_rate=3,padding='same',scope='HC2')\n with tf.variable_scope('C_layer2', reuse): # conv 2\n conv_params[\"filters\"] = 2*c\n L5 = conv1d(L4_3,**conv_params) \n with tf.variable_scope('HC_block2', reuse): # hc block 2\n L6_1 = highway_activation_conv(L5,kernel_size=3,padding='same',scope='HC1') \n L6_2 = highway_activation_conv(L6_1,kernel_size=3,padding='same',scope='HC2') \n with tf.variable_scope('C_layer3', reuse): # conv block 3\n conv_params[\"filters\"] = Fo \n L7_1 = conv1d(L6_2,**conv_params) \n L7_2 = tf.nn.relu(conv1d(L7_1,**conv_params))\n L7_3 = tf.nn.relu(conv1d(L7_2,**conv_params))\n Zlogit = conv1d(L7_3,**conv_params)\n Zhat = tf.nn.sigmoid(Zlogit) # sigmoid output layer\n \n return Zlogit, Zhat",
"def classify_lenet5(batch_size=500, output_size=20):\n\n rng = numpy.random.RandomState(23455)\n\n\n # start-snippet-1\n x = T.matrix('x') # the data is presented as rasterized images\n ######################\n # BUILD ACTUAL MODEL #\n ######################\n print '... building the model'\n\n # Reshape matrix of rasterized images of shape (batch_size, 28 * 28)\n # to a 4D tensor, compatible with our LeNetConvPoolLayer\n # (28, 28) is the size of MNIST images.\n layer0_input = x.reshape((batch_size, 1, 37, 23))\n\n # Construct the first convolutional pooling layer:\n # filtering reduces the image size to (28-5+1 , 28-5+1) = (24, 24)\n # maxpooling reduces this further to (24/2, 24/2) = (12, 12)\n # 4D output tensor is thus of shape (batch_size, nkerns[0], 12, 12)\n layer0 = LeNetConvPoolLayer(\n rng,\n input=layer0_input,\n image_shape=(batch_size, 1, 37, 23),\n filter_shape=(20, 1, 4, 2),\n poolsize=(2, 2),\n )\n\n # layer1 = LeNetConvPoolLayer(\n # rng,\n # input=layer0.output,\n # image_shape=(batch_size, 20, 17, 11),\n # filter_shape=(50, 20, 4, 2),\n # poolsize=(2, 2),\n # )\n #\n # layer4 = LeNetConvPoolLayer(\n # rng,\n # input=layer1.output,\n # image_shape=(batch_size, 50, 7, 5),\n # filter_shape=(100, 50, 4, 2),\n # poolsize=(2, 2),\n # )\n\n layer2_input = layer0.output.flatten(2)\n\n # construct a fully-connected sigmoidal layer\n layer2 = HiddenLayer(\n rng,\n input=layer2_input,\n n_in=3740,\n n_out=output_size,\n activation=T.tanh,\n use_bias=True\n )\n\n # layer5 = HiddenLayer(\n # rng,\n # input=layer2.output,\n # n_in=200,\n # n_out=output_size,\n # activation=T.tanh,\n # use_bias=True\n # )\n\n # classify the values of the fully-connected sigmoidal layer\n layer3 = LogisticRegression(input=layer2.output, n_in=output_size, n_out=2)\n\n model_params = pickle.load(open('../model/cnn_dist_'+str(output_size)+'.pkl'))\n #\n layer0.W = theano.shared(\n value=numpy.array(\n model_params[2].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer0.b = theano.shared(\n value=numpy.array(\n model_params[3].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer1.W = theano.shared(\n # value=numpy.array(\n # model_params[-4].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer1.b = theano.shared(\n # value=numpy.array(\n # model_params[-3].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n #\n # layer4.W = theano.shared(\n # value=numpy.array(\n # model_params[-6].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer4.b = theano.shared(\n # value=numpy.array(\n # model_params[-5].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer2.W = theano.shared(\n value=numpy.array(\n model_params[0].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer2.b = theano.shared(\n value=numpy.array(\n model_params[1].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # layer5.W = theano.shared(\n # value=numpy.array(\n # model_params[-10].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='W',\n # borrow=True\n # )\n #\n # layer5.b = theano.shared(\n # value=numpy.array(\n # model_params[-9].get_value(True),\n # dtype=theano.config.floatX\n # ),\n # name='b',\n # borrow=True\n # )\n\n layer3.W = theano.shared(\n value=numpy.array(\n model_params[4].get_value(True),\n dtype=theano.config.floatX\n ),\n name='W',\n borrow=True\n )\n\n layer3.b = theano.shared(\n value=numpy.array(\n model_params[5].get_value(True),\n dtype=theano.config.floatX\n ),\n name='b',\n borrow=True\n )\n\n # params = layer3.params + layer5.params + layer2.params + layer4.params + layer1.params + layer0.params\n\n datasets = load_data(None)\n\n sets = ['train', 'dev', 'test']\n dimension = [20000, 20000, 20000]\n for k in range(3):\n if k == 0:\n classify_set_x, classify_set_y, classify_set_z, classify_set_m, classify_set_c, classify_set_b= datasets[k]\n else:\n classify_set_x, classify_set_y, classify_set_z= datasets[k]\n\n # compute number of minibatches for training, validation and testing\n n_classify_batches = classify_set_x.get_value(borrow=True).shape[0]\n n_classify_batches /= batch_size\n\n # allocate symbolic variables for the data\n index = T.lscalar() # index to a [mini]batch\n classify = theano.function(\n [index],\n layer2.output,\n givens={\n x: classify_set_x[index * batch_size: (index + 1) * batch_size],\n }\n )\n\n r = []\n\n for i in xrange(n_classify_batches):\n m = classify(i)\n r.extend(m)\n r = np.array(r)\n print r.shape\n r = np.append(r, np.reshape(classify_set_y.eval(),(dimension[k], 1)), 1)\n numpy.savetxt('../extractedInformation/cnn_dist_'+str(output_size)+'/'+sets[k]+'.csv', r, delimiter=\",\")",
"def __init__(self, ):\n super().__init__()\n channels = 3\n\n # Initial convolution block\n out_features = 64\n # encoder\n self.input = nn.Sequential(\n nn.ReflectionPad2d(channels),\n nn.Conv2d(3, out_features, (7, 7)),\n nn.InstanceNorm2d(out_features),\n nn.MaxPool2d(2),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Conv2d(out_features, out_features * 2, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n nn.MaxPool2d(2),\n nn.Conv2d(out_features * 2, out_features * 4, (3, 3), stride=(1, 1), padding=(1, 1)),\n nn.LeakyReLU(0.2, inplace=True),\n )\n\n self.in0 = nn.InstanceNorm2d(256)\n self.block0 = blocks()\n self.block1 = blocks()\n self.block2 = blocks()\n self.block3 = blocks()\n self.block4 = blocks()\n self.block5 = blocks()\n self.block6 = blocks()\n self.block7 = blocks()\n\n self.out = nn.Sequential(\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 4, out_features * 2, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features * 2),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.Upsample(scale_factor=2),\n nn.Conv2d(out_features * 2, out_features, 3, stride=1, padding=1),\n nn.InstanceNorm2d(out_features),\n nn.LeakyReLU(0.2, inplace=True),\n\n nn.ReflectionPad2d(channels),\n nn.Conv2d(out_features, channels, 7),\n nn.Tanh(),\n )",
"def cspdarknet53_tiny(input_data):\n input_data = common.convolutional(input_data, (3, 3, 3, 32), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 32, 64), downsample=True)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 32, 32))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 32, 64))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 64, 128))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 64, 64))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 64, 128))\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 128, 256))\n route = input_data\n input_data = common.route_group(input_data, 2, 1)\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n route_1 = input_data\n input_data = common.convolutional(input_data, (3, 3, 128, 128))\n input_data = tf.concat([input_data, route_1], -1)\n input_data = common.convolutional(input_data, (1, 1, 128, 256))\n route_1 = input_data\n input_data = tf.concat([route, input_data], -1)\n input_data = tf.keras.layers.MaxPool2D(2, 2, 'same')(input_data)\n\n input_data = common.convolutional(input_data, (3, 3, 512, 512))\n\n return route_1, input_data",
"def discriminator_block(in_filters, out_filters, bn=True):\n block = [ nn.Conv2d(in_filters, out_filters, 3, 2, 1),\n nn.LeakyReLU(0.2, inplace=True),\n nn.Dropout2d(0.25)]\n if bn:\n block.append(nn.BatchNorm2d(out_filters, 0.8))\n return block",
"def train_segmentation():\n\n start = time.time()\n\n model_base = load_pretrained(get_base(), PRETRAINED)\n cut, lr = model_meta[arch]\n m = to_gpu(Unet34(model_base))\n model = UnetModel(m)\n\n sz = 256\n bs = 64\n\n md = get_data(sz, bs)\n\n learn = ConvLearner(md, model)\n learn.opt_fn = optim.Adam()\n learn.crit = LossBinary(jaccard_weight=5)\n learn.metrics = [accuracy_thresh(0.5), dice, IoU]\n wd = 1e-7\n lr = 1e-2\n\n learn.freeze_to(1)\n learn.fit(lr, 1, wds=wd, cycle_len=1, use_clr=(5,8))\n learn.unfreeze() # unfreeze encoder\n learn.bn_freeze(True)\n\n lrs = np.array([lr/100, lr/10, lr])\n learn.fit(lrs/3, 2, wds=wd, cycle_len=2, use_clr=(20,8))\n\n learn.save('./models/weighted_unet_256_p1')\n\n sz = 384\n bs = 32\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/5, 1, wds=wd, cycle_len=2, use_clr(10,8)) # first increase in image size with decreased bs\n learn.save('./models/weighted_unet_384_p1')\n\n sz = 512\n bs = 16\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/10, 2, wds=wd, cycle_len=1, use_clr=(10,8), best_save_name='./models/weighted_unet_512_p1') # second increase in image size with further decreased bs\n\n sz = 768\n bs = 8\n\n md = get_data(sz, bs)\n learn.set_data(md)\n learn.unfreeze()\n learn.bn_freeze(True)\n\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p1') # full image size with further decreased bs\n\n learn.crit = MixedLoss(10., 2.)\n learn.fit(lrs/50, 10, wds=5e-8, cycle_len=1, use_clr=(10,10), best_save_name='./models/weighted_unet_768_p2') # full image size with further decreased bs (final run)\n\n learn.save('./models/weighted_unet_768_final')\n\n print(f'Training finished in {time.time() - start) / 60 :.3} minutes.')",
"def discriminator_block(self, name):\n\n if self.fit_mask : \n \n inputs = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n else :\n # In:\n inputs = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Decision : 2\n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[inputs], outputs=[decision], name=name)\n return model"
] | [
"0.6385848",
"0.6249216",
"0.6237148",
"0.61455214",
"0.60191464",
"0.5987267",
"0.59708",
"0.5924136",
"0.5916637",
"0.5866943",
"0.58421737",
"0.5832506",
"0.57936084",
"0.57807344",
"0.5772715",
"0.57676303",
"0.5728045",
"0.5723477",
"0.57012206",
"0.5695109",
"0.56872606",
"0.5671793",
"0.56614685",
"0.5649441",
"0.56433535",
"0.56138957",
"0.5603185",
"0.5600989",
"0.5597334",
"0.55945134"
] | 0.6513848 | 0 |
Load ImageNet pretrained model into MobileNetv2 backbone, only happen when no checkpoint is loaded | def load_model(self):
if self.ckpt_flag:
LOG('Skip Loading Pre-trained Model......')
else:
if self.params.pre_trained_from is not None and os.path.exists(self.params.pre_trained_from):
try:
LOG('Loading Pre-trained Model at %s' % self.params.pre_trained_from)
pretrain = torch.load(self.params.pre_trained_from)
self.network.load_state_dict(pretrain)
LOG('Pre-trained Model Loaded!')
except:
WARNING('Cannot load pre-trained model. Start training......')
else:
WARNING('Pre-trained model do not exits. Start training......') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_model():\r\n model = MobileNetV2(weights=\"imagenet\")\r\n print(\"Model loaded\")\r\n return model",
"def load_model(self):\n self.pred_net.load((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.load((self.save_path / \"iqn_target_net\").absolute().as_posix())",
"def load_pretrained_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model_ft = models.alexnet(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier[6].in_features\n model_ft.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading pretrained ResNet18 Model\")\n model_ft = models.resnet18(pretrained=True)\n\n for param in model_ft.parameters(): # Code for fixing the Conv Layer\n param.requires_grad = False # During Training Conv layer does not learn.\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet50\":\n print(\"Loading pretrained ResNet50 Model\")\n\n model_ft = models.resnet50(pretrained=True)\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n\n num_ftrs = model_ft.fc.in_features\n model_ft.fc = nn.Linear(num_ftrs, 100)\n elif model_name==\"DenseNet\":\n print(\"Loading pretrained DenseNet161 Model\")\n model_ft = models.densenet161(pretrained=True)\n\n for param in model_ft.parameters():# Code for fixing the Conv Layer\n param.requires_grad = False\n num_ftrs = model_ft.classifier.in_features\n model_ft.classifier = nn.Linear(num_ftrs, 100)\n\n if cfg.load_model_true:\n model_ft.load_state_dict(torch.load(cfg.load_model_path))\n\n return model_ft",
"def load_custom_model(model_name):\n if model_name==\"AlexNet\":\n print(\"Loading pretrained AlexNet Model\")\n model = models.alexnet()\n num_ftrs = model.classifier[6].in_features\n model.classifier[6] = nn.Linear(num_ftrs, 100)\n elif model_name==\"ResNet18\":\n print(\"Loading ResNet18 Model\")\n model = models.resnet18() #Load the pytorch. torchvision model\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #Set it to match the ImageNet-100 Classes.\n elif model_name==\"ResNet50\":\n print(\"Loading ResNet50 Model\")\n model = models.resnet50()\n num_ftrs = model.fc.in_features\n model.fc = nn.Linear(num_ftrs, 100) #ImageNet-100 has 100 classes.\n elif model_name==\"DenseNet\":\n print(\"Loading DenseNet161 Model\")\n model = models.densenet161()\n num_ftrs = model.classifier.in_features\n model.classifier = nn.Linear(num_ftrs, 100)\n elif model_name==\"MyNet\":\n print(\"Loading Pyramid Model\")\n model = pyramid_net.create_model() # Load the model I implemented.\n\n if cfg.load_model_true: # Load the model that was stopped during training.\n model.load_state_dict(torch.load(cfg.load_model_path))\n\n return model",
"def load_pretrained_model(self,model_dir):\n rnn_params = json.load(open(os.path.join(model_dir,\n \"./model.json\")))[\"rnn\"]\n\n logging.info(\"Loading model from: {}\".format(model_dir))\n self.create_training_model(model_dir = model_dir,\n **rnn_params)\n #从目录中读取神经网络参数\n self.set_model_from_file()",
"def load(self):\r\n # self.model.load_state_dict(torch.load(os.path.join(self.ckpt_dir, 'best_model_state_dict.pt')))\r\n if torch.cuda.is_available():\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'))\r\n else:\r\n self.model = torch.load(os.path.join(self.ckpt_dir, 'best_model_INN.pt'), map_location=torch.device('cpu'))",
"def load_onnx_model(self):\n print(\"Loading Rescue Detection Model\")\n\n self.rescue_model = cv2.dnn.readNetFromONNX(os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n rescue_cnn_model_path))\n\n self.rescue_model.setPreferableTarget(Rescue_PI.preferable_target)",
"def load_model(self, fname: str) -> None:\n checkpoint_data = torch.load(fname)\n\n # Load the models\n # P-Net\n model_import_path = checkpoint_data['p_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['p_net']['model_name'])\n self.p_net = mod()\n self.p_net.set_params(checkpoint_data['p_net'])\n # Q-Net\n model_import_path = checkpoint_data['q_net']['model_import_path']\n imp = importlib.import_module(model_import_path)\n mod = getattr(imp, checkpoint_data['q_net']['model_name'])\n self.q_net = mod()\n self.q_net.set_params(checkpoint_data['q_net'])",
"def load_model(self, gpus=1):\r\n\t\t\r\n\t\tif self.model != None:\r\n\t\t\treturn\r\n\r\n\t\t## build the model on the CPU if parallelism is targeted\r\n\t\tif isinstance(gpus, Sequence):\r\n\t\t\tif len(gpus) != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus[0])\r\n\t\t\t\tmultigpu = False\r\n\t\telse:\r\n\t\t\tif gpus != 1:\r\n\t\t\t\tdevice = \"/cpu:0\"\r\n\t\t\t\tmultigpu = True\r\n\t\t\telse:\r\n\t\t\t\tdevice = \"/gpu:{:d}\".format(gpus)\r\n\t\t\t\tmultigpu = False\r\n\r\n\r\n\t\tif self.__prop__(\"Resume\"):\r\n\t\t\tself.model = keras.models.load_model(\r\n\t\t\t\tself.__prop__(\"SnapshotDirectory\") + self.__prop__(\"Prefix\") + self.__prop__(\"Resume\") + '.h5\"')\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\telse: \r\n\t\t\t\r\n\t\t\twith tensorflow.device(device):\r\n\t\t\t\tif self.__prop__(\"Prefix\").startswith(\"i3PosNet_VGG16\"):\r\n\t\t\t\t\tself.model = i3PosNetVGG(\r\n\t\t\t\t\t\tinput_shape=self.__prop__(\"InputShape\"), \r\n\t\t\t\t\t\tout_number=self.__prop__(\"TargetShape\"),\r\n\t\t\t\t\t\tlayer_count=self.__prop__(\"LayerCount\"), \r\n\t\t\t\t\t\tfc_layer_count=self.__prop__(\"FCLayerCount\"), \r\n\t\t\t\t\t\tfc_reg=self.__prop__(\"FCReg\"), \r\n\t\t\t\t\t\tconv_reg=self.__prop__(\"ConvReg\"), \r\n\t\t\t\t\t\tshrinking=self.__prop__(\"Shrinking\"),\r\n\t\t\t\t\t\tpadding=self.__prop__(\"Padding\"))\r\n\t\t\t\telse:\r\n\t\t\t\t\tself.model = i3PosNet(image_shape, out = self.__prop__(\"TargetShape\"))\r\n\r\n\t\t\tself.single_model = self.model\r\n\t\t\tif multigpu:\r\n\t\t\t\tself.model = multi_gpu_model(self.model, gpus)\r\n\t\t\t\t\r\n\t\t\t# clear model\r\n\t\t\t# try:\r\n\t\t\t\t# del self.model\r\n\t\t\t# except:\r\n\t\t\t\t# pass\r\n\r\n\t\t\tif self.__prop__(\"Optimizer\") == \"SGD\":\r\n\t\t\t\toptimizer = SGD(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\tmomentum= self.__prop__(\"Momentum\"),\r\n\t\t\t\t\tnesterov=True)\r\n\t\t\telif self.__prop__(\"Optimizer\") == \"Adam\":\r\n\t\t\t\toptimizer = Adam(\r\n\t\t\t\t\tlr=self.__prop__(\"BaseLR\"),\r\n\t\t\t\t\tdecay=self.__prop__(\"Gamma\") if self.__prop__(\"LRPolicy\") == \"decay\" else 0.,\r\n\t\t\t\t\t# use defaults for these for now (b1 = 0.9, b2 = 0.999, e = 1e-8\r\n\t\t\t\t\tbeta_1=self.__prop__(\"Beta1\"),\r\n\t\t\t\t\tbeta_2=self.__prop__(\"Beta2\"),\r\n\t\t\t\t\tepsilon=self.__prop__(\"Epsilon\")\r\n\t\t\t\t\t)\r\n\t\t\t\r\n\t\t\tself.model.compile(loss='mean_squared_error', optimizer=optimizer)",
"def _load_model(self):\n self._load_scaler('scaler.save')\n self._load_encoder('encoder0.save', 0)\n self._load_encoder('encoder1.save', 1)\n self._load_neural_network('model.json', 'model.h5')\n return",
"def __load_Model(self):\r\n PrintsForUser.printProcess(\"[INFO] Loading network...\")\r\n \r\n self.__model = load_model(self.__model_path)\r\n self.__lb = pickle.loads(open(self.__labels_path, \"rb\").read())",
"def load_pretrained_model(self, load_from):\n print(\"loading model from %s\\n\" % (load_from))\n try:\n if self.use_cuda:\n pretrained_dict = torch.load(load_from)\n else:\n pretrained_dict = torch.load(load_from, map_location='cpu')\n\n model_dict = self.online_net.state_dict()\n pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}\n model_dict.update(pretrained_dict)\n self.online_net.load_state_dict(model_dict)\n print(\"The loaded parameters are:\")\n keys = [key for key in pretrained_dict]\n print(\", \".join(keys))\n print(\"--------------------------\")\n except Exception as e:\n print(\"Failed to load checkpoint...\")\n print(e)",
"def load_feature_extractor(model_spec, device):\n\n model_type = model_spec['name']\n model_weights_fp = model_spec['weights']\n\n if model_type == 'imagenet_swav':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict.items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'imagenet_supervised':\n model = models.resnet50(pretrained=True)\n\n elif model_type == 'random':\n model = models.resnet50(pretrained=False)\n\n elif model_type == 'inat2018_supervised':\n model = models.resnet50(pretrained=False)\n # This model was actually trained with 10000 classes for the fc layer\n # but only 8142 (the number in inat2018) were actually updated\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_supervised':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in checkpoint['state_dict'].items()}\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_supervised_from_scratch':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Linear(model.fc.in_features, 10000)\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n msg = model.load_state_dict(checkpoint['state_dict'], strict=True)\n\n elif model_type == 'inat2021_mini_moco_v2':\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n checkpoint = torch.load(model_weights_fp, map_location=\"cpu\")\n\n # rename moco pre-trained keys\n state_dict = checkpoint['state_dict']\n for k in list(state_dict.keys()):\n # retain only encoder_q up to before the embedding layer\n if k.startswith('module.encoder_q') and not k.startswith('module.encoder_q.fc'):\n # remove prefix\n state_dict[k[len(\"module.encoder_q.\"):]] = state_dict[k]\n # delete renamed or unused k\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n elif model_type == 'inat2021_mini_swav' or model_type == 'inat2021_mini_swav_1k':\n # or could load from hub model\n # model = torch.hub.load('facebookresearch/swav', 'resnet50')\n\n model = models.resnet50(pretrained=False)\n model.fc = torch.nn.Identity()\n state_dict = torch.load(model_weights_fp, map_location=\"cpu\")\n\n state_dict = {k.replace(\"module.\", \"\"): v for k, v in state_dict['state_dict'].items()}\n for k in list(state_dict.keys()):\n if 'projection' in k or 'prototypes' in k:\n del state_dict[k]\n\n msg = model.load_state_dict(state_dict, strict=True)\n\n else:\n raise ValueError(\"Unknown pytorch model: %s\" % model_type)\n\n\n # remove the final fully connected layer so the model only operates with post average pool features\n model = torch.nn.Sequential(*(list(model.children())[:-1]))\n model.to(device)\n model.eval()\n\n feature_extractor = PTResNet50FeatureExtractor(model, device)\n\n return feature_extractor",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def load(self):\n if self.model is None:\n raise Exception(\"Build the model first.\")\n\n print(\"Loading model checkpoint {} ...\\n\".format(self.config[\"model\"][\"restore_model\"]))\n self.model.load_weights(self.config[\"model\"][\"restore_model\"])\n print(\"Model loaded!\")",
"def load(self):\n try:\n if self.model.is_cuda:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \"save_point.pth\")))\n else:\n self.model.load_state_dict(torch.load(os.path.join(self.save_path, \\\n \"save_point.pth\"), map_location=\"cpu\"))\n except:\n sys.exit(\"Unable to load previous model\")",
"def load_model(self):\n if os.stat('code/lr-model.pt').st_size == 0:\n return\n params = torch.load('code/lr-model.pt')\n self.set_params(params)",
"def load_trainer(self):\n super().load_trainer()\n\n logging.info(\"[Server #%d] Loading a pre-trained model.\", os.getpid())\n self.trainer.load_model()",
"def load_model(path):\n if os.path.isfile(path):\n print(\"=> loading checkpoint '{}'\".format(path))\n checkpoint = torch.load(path)\n\n # # size of the top layer\n # N = checkpoint['state_dict']['top_layer.bias'].size()\n #\n # # build skeleton of the model\n # sob = 'sobel.0.weight' in checkpoint['state_dict'].keys()\n # model = models.__dict__[checkpoint['arch']](sobel=sob, out=int(N[0]))\n #\n # # deal with a dataparallel table\n # def rename_key(key):\n # if not 'module' in key:\n # return key\n # return ''.join(key.split('.module'))\n #\n # checkpoint['state_dict'] = {rename_key(key): val\n # for key, val\n # in checkpoint['state_dict'].items()}\n #\n # # load weights\n # model.load_state_dict(checkpoint['state_dict'])\n # print(\"Loaded\")\n # else:\n # model = None\n # print(\"=> no checkpoint found at '{}'\".format(path))\n\n # net = models.__dict__['ResNet18'](low_dim=128)\n # net = models.__dict__['resnet18'](low_dim=128)\n\n net = models.__dict__['alexnet'](out=128)\n # net = models.__dict__['Alexnet_C'](out=args.low_dim)\n\n net = torch.nn.DataParallel(net, device_ids=range(torch.cuda.device_count()))\n net.load_state_dict(checkpoint['net'])\n\n return net",
"def load_pretrained_net_weights(net, ckpt_path):\n print(\"Loading Model: \", ckpt_path)\n print('')\n\n net.load_weights(ckpt_path).expect_partial()",
"def load_checkpoint(self, label):\n model_dir = os.path.join(\n config.results_dir, config.experiment_name, 'checkpoints')\n model_file = os.path.join(model_dir, '{}_net.pth.tar'.format(label))\n\n print(\"Loading model from {}\".format(model_file))\n model_dict = torch.load(model_file)\n\n self.use_cuda = model_dict['use_cuda']\n if self.use_cuda:\n self.net.cuda()\n\n self.net.load_state_dict(model_dict['net_state_dict'])",
"def load_checkpoint(self, model):\n print(f\"load model {self.save_model_path}\")\n model.load_state_dict(torch.load(self.save_model_path))",
"def load_trained_net(mal):\n model_root = os.path.join(os.getcwd(), 'data', 'models')\n model = load_model(os.path.join(model_root, 'model_' + mal + '.h5'))\n\n return model",
"def load_model(self, ckpt_fn):\n checkpoint = torch.load(ckpt_fn)\n self.net_.load_state_dict(checkpoint[\"model\"])\n self.optimizer_.load_state_dict(checkpoint[\"optimizer\"])\n self.epoch_ = checkpoint[\"epoch\"]\n self.global_step_ = checkpoint[\"global_step\"]\n self.model_samples_ = deque(checkpoint[\"model_samples\"])\n self.sampler.load_state_dict(checkpoint[\"sampler_state\"])\n self.ais_loss.load_state_dict(checkpoint[\"ais_state\"])\n self.replay_prob = checkpoint[\"replay_prob\"]\n self.max_replay = checkpoint[\"max_replay\"]",
"def load_model(model_name):\n if hasattr(torchvision.models, model_name):\n with torch.no_grad():\n if model_name.startswith(\"inception\"):\n height = width = 299\n mean = [0.5, 0.5, 0.5]\n std = [0.5, 0.5, 0.5]\n else:\n height = width = 224\n mean = [0.485, 0.456, 0.406]\n std = [0.229, 0.224, 0.225]\n input_shape = [1, 3, height, width]\n input_data = torch.randn(input_shape).float()\n for channel in range(3):\n input_data[:, channel] -= mean[channel]\n input_data[:, channel] /= std[channel]\n model = getattr(torchvision.models, model_name)(pretrained=True)\n model = model.float().eval()\n return model, [input_data]\n try:\n import pretrainedmodels\n if hasattr(pretrainedmodels, model_name):\n return load_pretrainedmodels(model_name)\n except ModuleNotFoundError:\n raise ModuleNotFoundError(\"Please install pretrainedmodels.pytorch\")\n raise RuntimeError(\"Model not supported\")",
"def load_bytes_subnetwork_pretrained_weights(self, model):\n print(\"ToImplement\")",
"def load_model(model, path):\n\tmodel.load_state_dict(torch.load(path))\n\tprint(\"pre-trained model loaded from {}\".format(path))",
"def load_model(self):\r\n try:\r\n self.model = CRNN_STN(self.crnn_cfg())\r\n self.model.load_weights(config.CRNN_Model_Path)\r\n except:\r\n print('Error in method {0} in module {1}'.format('load_model', 'crnn_bridge.py'))",
"def checkpoint_load(checkpoint_path, gpu):\n model_info = torch.load(checkpoint_path)\n model = models.vgg19(pretrained=True)\n for param in model.parameters():\n param.requires_grad = False\n \n model.class_to_idx = model_info['class_to_idx']\n\n model = classifier(model)\n model.load_state_dict(model_info[\"model_state_dict\"])\n return model, model.class_to_idx",
"def load_model(self, model_type='keras', create_dir=True):\n if os.path.isdir(self.pred_dir) and create_dir is True:\n print('Pictures already predicted with that model')\n else:\n if create_dir is True:\n os.makedirs(self.pred_dir)\n print('created new directory ', self.pred_dir)\n\n if model_type == 'keras':\n # def predict(self, model_dir, img_dir, output_dir, batch_size=4, train_dir=None):\n\n img_paths = glob.glob(os.path.join(self.frame_dir, '*'))\n img_gen = ImageGenerator(img_paths, batch_size=3, shuffle=False, normalize='std_norm', augmentation=False)\n\n self.model = load_model(os.path.join(self.model_dir, 'model.h5'), custom_objects={'f1_loss': f1_loss})\n\n if model_type == 'tensorflow':\n import tensorflow as tf\n\n def load_image(path, dtype=np.float32):\n data = np.array(cv2.imread(path), dtype)\n\n # normalization\n data -= np.amin(data)\n data /= np.amax(data)\n return data\n\n tensor_node_op = 'ArgMax:0' # 'div_1:0' # 'truediv_21:0'\n tensor_node_x = 'Placeholder:0'\n tensor_node_prob = 'Placeholder_2:0'\n meta_file = '/model.cpkt.meta'\n\n with tf.Session() as sess:\n\n writer = tf.summary.FileWriter(\"tensorflow\") # setup writer object for tensorboard\n saver = tf.train.import_meta_graph(self.model_dir + meta_file)\n saver.restore(sess, tf.train.latest_checkpoint(self.model_dir))\n print(\"Model restored from file: %s\" % self.model_dir)\n\n # get the graph in the current thread\n graph = tf.get_default_graph()\n # node_names = [tensor.name for tensor in graph.as_graph_def().node]\n # print(node_names)\n\n # access the input key words for feed_dict\n xk = graph.get_tensor_by_name(tensor_node_x)\n keep_prob = graph.get_tensor_by_name(tensor_node_prob)\n\n # Now, access the operation that you want to run.\n restored_op = graph.get_tensor_by_name(tensor_node_op)\n\n # loop through files and save predictions\n for image in os.listdir(self.frame_dir):\n # load image\n x = load_image(os.path.join(self.frame_dir, image))\n\n # run prediction\n prediction = sess.run(restored_op, feed_dict={xk: [x], keep_prob: 1.})[0]\n\n # transform prediction to black and white and store as png\n pred_processed = (prediction * 255).astype(np.uint8)\n self.predictions.append(pred_processed)\n\n # create image file in prediction folder\n image_name = self.model_name + '__' + os.path.splitext(image)[0] + '.png'\n cv2.imwrite(os.path.join(self.pred_dir, image_name), pred_processed)\n\n writer.add_graph(graph) # add graph to tensorboard\n\n print('model loaded')"
] | [
"0.8060752",
"0.7502474",
"0.70105195",
"0.70047826",
"0.69656646",
"0.69574654",
"0.6884236",
"0.6866858",
"0.68493474",
"0.6838054",
"0.68307084",
"0.6810942",
"0.6784349",
"0.6778722",
"0.6778722",
"0.6765091",
"0.6763571",
"0.67604506",
"0.67532",
"0.6735666",
"0.67249376",
"0.67236537",
"0.67173094",
"0.6714373",
"0.66629016",
"0.6654085",
"0.66382617",
"0.6637898",
"0.66286874",
"0.6588637"
] | 0.7511762 | 1 |
Plot train/val loss curve | def plot_curve(self):
x1 = np.arange(self.init_epoch, self.params.num_epoch+1, dtype=np.int).tolist()
x2 = np.linspace(self.init_epoch, self.epoch,
num=(self.epoch-self.init_epoch)//self.params.val_every+1, dtype=np.int64)
plt.plot(x1, self.train_loss, label='train_loss')
plt.plot(x2, self.val_loss, label='val_loss')
plt.legend(loc='best')
plt.title('Train/Val loss')
plt.grid()
plt.xlabel('Epoch')
plt.ylabel('Loss')
plt.show() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_loss():\n df = pd.read_csv('data/loss.csv', encoding='utf-8')\n loss = df['loss'].values\n val_loss = df['val_loss'].values\n x = [i for i in range(1, len(loss) + 1)]\n\n plt.plot(x, loss, label='Train loss')\n plt.plot(x, val_loss, label='Val loss')\n\n plt.xlabel('Epochs')\n plt.ylabel('Contrastive Loss')\n plt.title('Train and test loss')\n plt.grid(True)\n plt.legend(shadow=True, fontsize='x-large')\n\n plt.show()",
"def plot_loss_curve(num_epochs, losses):\n plt.xlabel('Epochs')\n plt.ylabel('Loss') \n plt.title('Loss Curve') \n plt.plot(range(num_epochs), losses)\n plt.show()",
"def plot_loss(x, loss_train, loss_valid, title):\n plt.figure()\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.title(title)\n plt.plot(x, loss_train, '-b', label='Training')\n plt.plot(x, loss_valid, '-r', linestyle=(0, (1, 2)), label='Validation')\n plt.legend([\"Training\", \"Validation\"], loc=\"upper right\", frameon=False)\n plt.yscale(\"log\")\n # plt.show()\n plt.savefig('{}.png'.format(title))",
"def plot_loss(stats):\r\n plt.plot(stats['train_loss_ind'], stats['train_loss'], label='Training loss')\r\n plt.plot(stats['val_loss_ind'], stats['val_loss'], label='Validation loss')\r\n plt.legend()\r\n plt.xlabel('Number of iterations')\r\n plt.ylabel('Loss')\r\n plt.show()",
"def loss_plot(train_loss, val_loss, filename):\n\tplt.plot(train_loss)\n\tplt.plot(val_loss)\n\tplt.ylabel('Loss')\n\tplt.xlabel('Epochs')\n\tplt.legend(['Train', 'Val'], loc='upper right')\n\tplt.savefig(filename)\n\tplt.close()",
"def plot_loss(self):\n plt.plot(self.loss[10:], 'g+', label = \"loss\")\n plt.plot(self.loss[10:], 'r--', label = \"loss (smooth)\")\n plt.title(f\"Graph of loss after {len(self.loss)} steps of Gradient Descent.\")\n plt.xlabel('steps')\n plt.ylabel('loss')\n plt.legend()\n plt.show()",
"def plot_loss(path, current_epoch, train_loss, test_loss):\n plotname = os.path.join(path, \"training_loss_curve.png\")\n fig = plt.figure()\n plt.axes().set_facecolor(\"#fbc9bc\")\n plt.plot(\n range(1, current_epoch + 1), train_loss, color=\"#ff6050\", label=\"Training Loss\"\n )\n plt.plot(range(1, current_epoch + 1), test_loss, color=\"#19214e\", label=\"Test Loss\")\n plt.xlabel(\"Epoch Count\")\n plt.ylabel(\"Model Loss\")\n plt.legend()\n fig.savefig(plotname, bbox_inches=\"tight\")\n plt.close()",
"def plotLoss():\n # ssr\n ssr = np.log(gradientDescent(X, y)[1])\n # number of iterations \n iterations = np.log(np.arange(1, len(ssr) + 1, 1))\n # plot reduction of ssr\n plt.plot(iterations, ssr)\n # xlabel\n plt.xlabel(\"Iteration\")\n # ylabel\n plt.ylabel(\"SSR\")\n # title\n plt.title(\"Reduction of SSR by number of Iterations\")\n # show plot \n plt.show()",
"def plot_loss(training_errors, validation_errors):\n plt.xscale('Log')\n plt.xlabel('Epochs')\n plt.ylabel('Mean Actual Error')\n plt.plot(training_errors, label = \"Training Error\", \\\n color = 'blue')\n plt.plot(validation_errors, label = \"Validation Error\", \\\n color = 'red')\n plt.legend()\n # Saves plot automatically, adjust filename as needed.\n plt.savefig('reservoir_05whdens_100h_7spec_test_3.png')\n plt.show()",
"def plot_loss (history):\n \n history_dict = history.history\n loss_values = history_dict['loss']\n val_loss_values = history_dict['val_loss']\n epochs = range(1, len(loss_values) + 1)\n\n plt.plot (epochs, loss_values, 'bo', label='Training loss')\n plt.plot (epochs, val_loss_values, 'b', label=\"validation loss\")\n plt.title('Training and validation loss')\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()",
"def plot_loss(model_fit, save_folder): \n train_loss = model_fit.history['loss']\n val_loss = model_fit.history['val_loss']\n epoch_axis = np.arange(1, len(train_loss) + 1)\n plt.title('Train vs Validation Loss')\n plt.plot(epoch_axis, train_loss, 'b', label='Train Loss')\n plt.plot(epoch_axis, val_loss,'r', label='Val Loss')\n plt.xlim([1, len(train_loss)])\n plt.xticks(np.arange(min(epoch_axis), max(epoch_axis) + 1, round((len(train_loss) / 10) + 0.5)))\n plt.legend(loc='upper right')\n plt.ylabel('Loss')\n plt.xlabel('Epochs')\n plt.savefig(save_folder + '/loss.png')\n plt.show()\n plt.close()",
"def plot_loss_curves(results):\n loss = results[\"train_loss\"]\n test_loss = results[\"test_loss\"]\n\n accuracy = results[\"train_acc\"]\n test_accuracy = results[\"test_acc\"]\n\n epochs = range(len(results[\"train_loss\"]))\n\n plt.figure(figsize=(15, 7))\n\n # Plot loss\n plt.subplot(1, 2, 1)\n plt.plot(epochs, loss, label=\"train_loss\")\n plt.plot(epochs, test_loss, label=\"test_loss\")\n plt.title(\"Loss\")\n plt.xlabel(\"Epochs\")\n plt.legend()\n\n # Plot accuracy\n plt.subplot(1, 2, 2)\n plt.plot(epochs, accuracy, label=\"train_accuracy\")\n plt.plot(epochs, test_accuracy, label=\"test_accuracy\")\n plt.title(\"Accuracy\")\n plt.xlabel(\"Epochs\")\n plt.legend()",
"def plot_errors(loss_train, loss_val, jet):\n plt.plot(list(range(len(loss_train))), loss_train, 'g', label='Training loss')\n plt.plot(list(range(len(loss_val))), loss_val, 'b', label='Validation loss')\n plt.title('Training and Validation loss for jet: {jet}'.format(jet=jet))\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.legend()\n plt.show()",
"def learning_viz(self) :\n self.train\n history = self.history\n plot_loss(history)",
"def plot_training_curve(path):\n import matplotlib.pyplot as plt\n train_err = np.loadtxt(\"{}_train_err.csv\".format(path))\n val_err = np.loadtxt(\"{}_val_err.csv\".format(path))\n train_loss = np.loadtxt(\"{}_train_loss.csv\".format(path))\n val_loss = np.loadtxt(\"{}_val_loss.csv\".format(path))\n plt.title(\"Train vs Validation Error\")\n n = len(train_err) # number of epochs\n plt.plot(range(1,n+1), train_err, label=\"Train\")\n plt.plot(range(1,n+1), val_err, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Error\")\n plt.legend(loc='best')\n plt.show()\n plt.title(\"Train vs Validation Loss\")\n plt.plot(range(1,n+1), train_loss, label=\"Train\")\n plt.plot(range(1,n+1), val_loss, label=\"Validation\")\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.legend(loc='best')\n plt.show()",
"def plot_cost(c_v, c_t, save_plots_path):\n\n plt.figure()\n plt.plot(c_v, label='Validation loss')\n plt.plot(c_t, label='Training loss')\n plt.legend()\n title = 'Loss per epoch'\n plt.title(title)\n plt.xlabel(\"Epoch\")\n plt.ylabel(\"Loss\")\n plt.savefig(save_plots_path + \"swag_loss_plot.png\")",
"def plot_losses(train, test, mode):\n\tplt.figure()\n\tplt.plot(range(len(train)), train, 'r', label='Training')\n\tplt.plot(range(len(test)), test, 'b', label='Testing')\n\tplt.title('MSE Loss (batch type: ' + mode + ')')\n\tplt.legend()\n\tplt.show()",
"def plot_model_performance(train_loss, train_acc, train_val_loss, train_val_acc):\n \n green = '#72C29B'\n orange = '#FFA577'\n \n with plt.xkcd():\n # plot model loss\n fig, ax1 = plt.subplots()\n ax1.plot(range(1, len(train_loss) + 1), train_loss, green, linewidth=5,\n label='training')\n ax1.plot(range(1, len(train_val_loss) + 1), train_val_loss, orange,\n linewidth=5, label='validation')\n ax1.set_xlabel('# epoch')\n ax1.set_ylabel('loss')\n ax1.tick_params('y')\n ax1.legend(loc='upper right', shadow=False)\n # plot model accuracy\n fig, ax2 = plt.subplots()\n ax2.plot(range(1, len(train_acc) + 1), train_acc, green, linewidth=5,\n label='training')\n ax2.plot(range(1, len(train_val_acc) + 1), train_val_acc, orange,\n linewidth=5, label='validation')\n ax2.set_xlabel('# epoch')\n ax2.set_ylabel('accuracy')\n ax2.tick_params('y')\n ax2.legend(loc='lower right', shadow=False)\n plt.show()",
"def plot_loss_acc(name,score):\n plt.title(name)\n plt.xlabel('Epoch Number')\n plt.ylabel(name.split(sep=' ')[1])\n plt.plot(score)\n plt.savefig(\"graphs/\"+name+\".png\")",
"def plot_loss(G_losses, D_losses):\n plt.figure(figsize=(10,5))\n plt.title(\"Generator and Discriminator Loss During Training\")\n plt.plot(G_losses,label=\"G\")\n plt.plot(D_losses,label=\"D\")\n plt.xlabel(\"iterations\")\n plt.ylabel(\"Loss\")\n plt.legend()\n plt.show()",
"def plot_train_history(self):\n plt.figure()\n plt.xlabel('Epochs')\n plt.ylabel('Loss')\n plt.plot(self.train_history.history['loss'])\n plt.plot(self.train_history.history['val_loss'])\n plt.legend(['Training', 'Validation'])\n\n plt.show()",
"def plot_train_val_loss(path_to_file, train_filename, val_filename):\n path = '../'\n labels = ['gen_total_loss', 'gen_loss', 'l1_loss', 'disc_total_loss', 'disc_gen_loss', 'disc_real_loss']\n with open(path_to_file + train_filename + '.csv', newline='') as f:\n reader = csv.reader(f)\n train_data = np.array(list(reader))\n with open(path_to_file + val_filename + '.csv', newline='') as f:\n reader = csv.reader(f)\n val_data = np.array(list(reader))\n\n if train_data.shape == val_data.shape:\n # change label number for the type of loss that should be plotted\n label = 0\n print(train_data.shape, val_data.shape)\n epoch_count = range(1, train_data.shape[0] + 1)\n plt.figure()\n plt.plot(epoch_count, val_data[:len(epoch_count), label].astype('float32'))\n plt.plot(epoch_count, train_data[:len(epoch_count), label].astype('float32'))\n plt.legend(['validation', 'train'])\n plt.xlabel('Epoch')\n plt.ylabel(labels[label])\n plt.show()",
"def plot_loss(history, name):\n\n plt.plot(history.history['loss'])\n plt.plot(history.history['val_loss'])\n plt.title('model loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train', 'val'], loc='upper left')\n # plt.show()\n plt.savefig(name, format=\"png\")",
"def plot_loss_vs_epoch(history, var_train, var_val, show=False):\n plt.figure(figsize=(10, 8))\n plt.grid(True)\n plt.plot(history.history['loss']/var_train, marker=\"o\")\n plt.plot(history.history['val_loss']/var_val, marker=\"o\")\n plt.title('Model Loss')\n plt.ylabel('Loss (Normalised to variance of dataset)')\n plt.xlabel('Epoch')\n plt.legend(['Train', 'Validation'])\n # plt.ylim(bottom=0)\n filename = \"img/\"\n filename += datetime.now().strftime(\"%y%m%d_%H%M\")\n filename += \"_model_loss.png\"\n plt.savefig(filename)\n\n if show:\n plt.show()",
"def train_visualization(output_path): \n log_path = output_path + 'output.log'\n Train_Cost, Valid_Cost, Test_Cost, Train_Acc, Valid_Acc, Test_Acc = log_reader(log_path)\n n_epoch = len(Train_Cost)\n\n x1 = range(n_epoch)\n x2 = range(n_epoch)\n y1 = Train_Cost\n y2 = Valid_Cost\n y3 = Test_Cost\n y4 = Train_Acc\n y5 = Valid_Acc\n y6 = Test_Acc\n plt.subplot(2, 1, 1)\n plt.plot(x1, y1, label=\"Train_Cost\", linewidth=2)\n plt.plot(x1, y2, label=\"Valid_Cost\", linewidth=2)\n plt.plot(x1, y3, label=\"Test_Cost\", linewidth=2)\n\n plt.title('binary cross entropy vs. epoches')\n plt.ylabel('binary cross entropy')\n plt.legend(loc='best')\n plt.subplot(2, 1, 2)\n plt.plot(x2, y4, label=\"Train_Acc\", linewidth=2)\n plt.plot(x2, y5, label=\"Valid_Acc\", linewidth=2)\n plt.plot(x2, y6, label=\"Test_Acc\", linewidth=2)\n plt.xlabel('Accuracy@20 vs. epoches')\n plt.ylabel('Accuracy@20')\n plt.legend(loc='best')\n plt.savefig(output_path + 'loss_fig.png')\n # plt.show()",
"def Plot_loss(history_object): \n ### print the keys contained in the history object\n print(history_object.history.keys())\n print(history_object.history['loss'])\n print(history_object.history['val_loss'])\n\n ### plot the training and validation loss for each epoch\n plt.plot(history_object.history['loss'])\n plt.plot(history_object.history['val_loss'])\n plt.title('model mean squared error loss')\n plt.ylabel('mean squared error loss')\n plt.xlabel('epoch')\n plt.legend(['training set', 'validation set'], loc='upper right')\n plt.show()",
"def cross_validation_visualization(lambdas, loss_train, loss_test):\n plt.semilogx(lambdas, loss_train, marker=\".\", color='b', label='train error')\n plt.semilogx(lambdas, loss_test, marker=\".\", color='r', label='test error')\n plt.xlabel(\"lambda\")\n plt.ylabel(\"rmse\")\n plt.title(\"cross validation\")\n plt.legend(loc=2)\n plt.grid(True)\n plt.savefig(\"cross_validation_mse\")",
"def graph(trainingLoss, validationLoss = None):\n style.use('fivethirtyeight')\n fig = plt.figure()\n ax1 = fig.add_subplot(1, 1, 1)\n ax1.cla()\n if validationLoss is not None:\n ax1.plot(np.array(range(len(trainingLoss))) + 1, validationLoss, label=\"Validation loss\")\n# print('Epoch: {} \\tTraining Loss: {:.6f} \\tValidation Loss: {:.6f}'.format(len(trainingLoss), trainingLoss[-1], validationLoss[-1]))\n# else:\n# print('Epoch: {} \\tTraining Loss: {:.6f}'.format(len(trainingLoss), trainingLoss[-1]))\n ax1.plot(np.array(range(len(trainingLoss))) + 1, trainingLoss, label=\"Training loss\")\n plt.legend(loc='best')\n plt.tight_layout()\n plt.show()",
"def plot_learning_curve(model, X_train, X_test, y_train, y_test):\n\n m, train_scores, valid_scores = learning_curve(estimator = model, \n X = X_train, y = y_train.ravel(), train_sizes = np.linspace(0.1,1.0, 80))\n\n train_cv_err = np.mean(train_scores, axis=1)\n test_cv_err = np.mean(valid_scores, axis=1)\n tr, = plt.plot(m, train_cv_err)\n ts, = plt.plot(m, test_cv_err)\n plt.legend((tr, ts), ('training error', 'test error'), loc = 'best')\n plt.title('Learning Curve')\n plt.xlabel('Data Points')\n plt.ylabel('Accuracy')",
"def plot_loss(self):\n train_elbo_range = range(len(self.train_elbo_hist))\n val_elbo_range = range(len(self.val_elbo_hist))\n train_loss_range = range(len(self.train_loss_hist))\n val_loss_range = range(len(self.val_loss_hist))\n\n fig, ax = plt.subplots(2, 2)\n ax[0][0].plot(train_elbo_range, self.train_elbo_hist)\n ax[0][0].title.set_text(\"Train ELBO\")\n ax[0][1].plot(val_elbo_range, self.val_elbo_hist)\n ax[0][1].title.set_text(\"Val ELBO\")\n ax[1][0].plot(train_loss_range, self.train_loss_hist)\n ax[1][0].title.set_text(\"Train MSE\")\n ax[1][1].plot(val_loss_range, self.val_loss_hist)\n ax[1][1].title.set_text(\"Val MSE\")\n plt.tight_layout()\n plt.show()"
] | [
"0.8248865",
"0.80044657",
"0.7913672",
"0.7894027",
"0.7888984",
"0.7629496",
"0.76258636",
"0.7564437",
"0.75305593",
"0.7529871",
"0.7517523",
"0.75074285",
"0.7466194",
"0.74021125",
"0.73700374",
"0.7361953",
"0.7360696",
"0.7335114",
"0.7294053",
"0.727845",
"0.72673714",
"0.72647315",
"0.7260771",
"0.7229311",
"0.7222473",
"0.7200537",
"0.7198146",
"0.71932286",
"0.71724343",
"0.71681505"
] | 0.8301858 | 0 |
Start a stopped node. | def ex_start_node(self, node):
# NOTE: This method is here for backward compatibility reasons after
# this method was promoted to be part of the standard compute API in
# Libcloud v2.7.0
return self.start_node(node=node) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def start_node(self, **kwargs):\n # project_name, node_name\n\n try:\n if kwargs['project_name'] in self.data:\n project_name = kwargs['project_name']\n project_id = self.data[project_name]['project_id']\n if kwargs['node_name'] in self.data[project_name]['nodes']:\n node_name = kwargs['node_name']\n node_id = self.data[project_name]['nodes'][node_name]['node_id']\n resp = self.post_to_server('projects/{}/nodes/{}/start'.format(project_id, node_id),{})\n print('Node \\'{}\\' started.'.format(node_name))\n self.data[project_name]['nodes'][node_name]['status'] = \"running\"\n except:\n traceback_print_exc()",
"def start_instance(self):\n instance_id = self._choose_among_stopped_instances()\n\n # Cancel\n if not instance_id:\n print 'Operation cancelled'\n return\n\n print '# Starting the instance \"%s\"' % instance_id\n if self.compute.start_instance(instance_id):\n print 'The instance has been started'\n else:\n print 'The instance could not be started'",
"def startNode(klass):\n try:\n ws = klass('ws://localhost:8080/ws')\n ws.daemon = False\n ws.connect()\n except:\n ws.close()",
"def __init__(self):\n self.start = Node('-1')",
"def start(self) -> None:\n self.stopping.clear()\n self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)\n self.thread.start()",
"def start():\n Networker.stop()\n Networker.Instance = Networker()",
"def stopped(self, exitCode):\r\n self._protocol = None\r\n\r\n if self._call:\r\n self._call.cancel()\r\n\r\n if exitCode:\r\n log.msg('Node ({0}) terminated with exit code: '\r\n '{1}'.format(self._name, exitCode))\r\n\r\n if self._owner:\r\n self._owner.unregisterNode(self)\r\n self._owner = None",
"def start(self):\n return self.reset(\n starting=1,\n stopped=0,\n )",
"def start(self):\n self._state = 'Started'",
"def __init__(self, start_node):\n self.start_node = start_node",
"def start_stopped_nodes(genesis_file: str,\n ssh_config_file: str = DEFAULT_CHAOS_SSH_CONFIG_FILE) -> bool:\n output_dir = get_chaos_temp_dir()\n stopped_primary_dict = {}\n stopped_nodes_file = \"{}/stopped_nodes\".format(output_dir)\n try:\n with open(stopped_nodes_file, 'r') as stopped_primary:\n stopped_primary_dict = json.load(stopped_primary)\n except FileNotFoundError as e:\n message = \"\"\"%s does not exist. Must call stop_n_nodes before'\n calling start_stopped_nodes\"\"\"\n logger.error(message, stopped_nodes_file)\n logger.exception(e)\n return False\n\n stopped_nodes = stopped_primary_dict.get('stopped_nodes', None)\n if not stopped_nodes:\n message =\"\"\"Missing stopped_nodes element in\n stopped_nodes_file state file {}\"\"\"\n logger.error(message.format(stopped_nodes_file))\n return False\n\n for backup_primary in stopped_nodes.keys():\n succeeded = start_by_strategy(genesis_file, backup_primary,\n stopped_nodes[backup_primary],\n ssh_config_file=ssh_config_file)\n if not succeeded:\n return False\n return True",
"def startVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/start\" % (node,vmid), post_data)\n return data",
"def start(self):\n if self._start is not None:\n raise ValueError, \"task %s already started\" % self._name\n self._start = 1\n self.run()",
"async def stop_node(request: web.Request) -> web.Response:\n req_ctx = RequestContext.parse_obj(request)\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n return await start_long_running_task(\n request,\n _stop_dynamic_service_with_progress,\n task_context=jsonable_encoder(req_ctx),\n path_params=path_params,\n app=request.app,\n service_uuid=f\"{path_params.node_id}\",\n fire_and_forget=True,\n )",
"def replace_stopped_node_test(self):\n self._replace_node_test(gently=False)",
"def from_stop(self, stop_number: 'str or int') -> Location:\n\n stop_number = str(stop_number)\n\n if not STOPNUM_RE.match(stop_number):\n raise InvalidStopNumber('Invalid stop number')\n\n return Location({\n 'stop': stop_number,\n '': 'Node'\n })",
"def get_start_node(self):\n return self._start",
"def to_start(self, node):\n if node in self.graph:\n if node in self.keep_index_backward:\n for pred in self.keep_index_backward[node]:\n self.to_start(pred)\n\n if node in self.graph:\n self.start.append(node)\n self.graph.remove_node(node)\n\n if node in self.keep_index_forward:\n for succ in self.keep_index_forward[node]:\n self.to_start(succ)\n self.logger.debug('%s %s\\t(to_start: %s)', self.start, self.end, node)",
"def start(name, call=None):\n datacenter_id = get_datacenter_id()\n conn = get_conn()\n node = get_node(conn, name)\n\n conn.start_server(datacenter_id=datacenter_id, server_id=node[\"id\"])\n\n return True",
"def is_start_node():\n return False",
"def start_instance(InstanceId=None):\n pass",
"def _start_oef_node(self, network_node):",
"def run():\n server = current_server()\n server._auto_stop = True\n return start()",
"def start(self):\n self.parent.start(auto_terminate=False)\n self.started = True",
"def start(self):\n self._run_start()\n self._stored.is_started = True\n if self._is_single_node and not self._stored.is_initialized:\n self._stored.is_initialized = self._stored.is_initialized = True\n self.on.cluster_initialized.emit(self._get_cluster_id())\n self.on.daemon_started.emit()",
"def start():\n\n start_server()",
"def move_start_node(self, x, y):",
"def start():",
"def start():",
"def start():"
] | [
"0.6175125",
"0.58125246",
"0.5782543",
"0.5709254",
"0.5606345",
"0.5585785",
"0.5553387",
"0.54930484",
"0.5489367",
"0.5465326",
"0.5459416",
"0.54412687",
"0.5434321",
"0.5397947",
"0.53963757",
"0.5390883",
"0.5380287",
"0.5379175",
"0.53741014",
"0.5371947",
"0.5304699",
"0.5255429",
"0.52537733",
"0.5240784",
"0.52365005",
"0.5219612",
"0.5212273",
"0.5207738",
"0.5207738",
"0.5207738"
] | 0.6000319 | 1 |
Suspend a running node. | def ex_suspend_node(self, node):
domain = self._get_domain_for_node(node=node)
return domain.suspend() == 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def suspendVirtualMachine(self,node,vmid):\n post_data = None\n data = self.connect('post',\"nodes/%s/qemu/%s/status/suspend\" % (node,vmid), post_data)\n return data",
"def suspend(self):\n\t\treturn Job(SDK.PrlVm_Suspend(self.handle)[0])",
"def suspend(host=None,time=10):\r\n if host:\r\n host.suspend(time)",
"def suspend(self):\n self.__running = False",
"def suspend_nodes(self, poll_wait_time=5):\n _url = f\"{self.connector.base_url}/projects/{self.project_id}/nodes/suspend\"\n\n self.connector.http_call(\"post\", _url)\n\n # Update object\n time.sleep(poll_wait_time)\n self.get_nodes()",
"def SetSuspend(self, val):\n self.suspended = val\n if self.suspended:\n self.Disconnect()",
"def suspend(self, name=None):\n # UNTESTED\n server = self.cloudman.get_server(name)['id']\n r = self.cloudman.compute.suspend_server(server)\n\n return r\n\n \"\"\"\n raise NotImplementedError\n\n #\n # BUG THIS CODE DOES NOT WORK\n #\n nodes = self.list()\n for node in nodes:\n if node.name == name:\n r = self.cloudman.ex_stop_node(self._get_node(node.name),\n deallocate=False)\n # print(r)\n # BUG THIS IS NOT A DICT\n return(node, name=name)\n self.cloudman.destroy_node(node)\n\n #\n # should return the updated names dict, e.g. status and so on\n # the above specification is for one name\n #\n \n return None\n \"\"\"",
"def suspend(self, name=None):\n raise NotImplementedError",
"def suspend(self, context, instance):\n LOG.info(\"Suspending instance %s\" % instance.uuid)\n self.power_off(instance)",
"def main(nodelist):\n log.debug(f\"SuspendProgram {nodelist}\")\n nodes = util.to_hostnames(nodelist)\n\n # Filter out nodes not in config.yaml\n cloud_nodes, local_nodes = lkp.filter_nodes(nodes)\n if len(local_nodes) > 0:\n log.debug(\n f\"Ignoring slurm-gcp external nodes '{util.to_hostlist(local_nodes)}' from '{nodelist}'\"\n )\n if len(cloud_nodes) > 0:\n log.debug(\n f\"Using cloud nodes '{util.to_hostlist(cloud_nodes)}' from '{nodelist}'\"\n )\n else:\n log.debug(\"No cloud nodes to suspend\")\n return\n\n # suspend is allowed to delete exclusive nodes\n log.info(f\"suspend {nodelist}\")\n suspend_nodes(nodes)",
"def _suspend(event: E) -> None:\n event.app.suspend_to_background()",
"def suspend(vm='', env=''):\n local( main_dir + '/vagrant/bin/vm.sh suspend ' + str(vm) + ' ' + str(env) )",
"def suspend(self, instance, callback):\n pass",
"def suspend_virtual_machine(self, vm):\n try:\n self.client.suspend_vm(vm.backend_id)\n except VMwareError as e:\n raise VMwareBackendError(e)",
"def suspend_transaction():\n \n console.connect()\n time.sleep(2)\n if console._get_text(\"//*[@id='terminal_list']/div/div[4]/div/div[2]\") != \"In Transaction\":\n console.click_function_key(\"In Transaction\")\n if console.is_element_present(\"//button[starts-with(@class, 'funcButton') and contains(text(), 'Suspend Transaction')]\"):\n log.info(\"Suspending the transaction.\")\n console.click(\"Suspend Transaction\")\n console.close()",
"def power_off_node(self, node):\n msg = 'Node {0} has not become offline after hard shutdown'.format(\n node.name)\n logger.info('Power off node %s', node.name)\n node.destroy()\n logger.info('Wait a %s node offline status', node.name)\n helpers.wait(lambda: not self.fuel_web.get_nailgun_node_by_devops_node(\n node)['online'], timeout=60 * 5, timeout_msg=msg)",
"def suspend(self):\n assert (\n self._current is not None\n ), \"You can only suspend the current task if you are running the event loop.\"\n suspended = self._current\n\n def resume():\n self._tasks.append(suspended)\n\n self._current = None\n return _yield_once(), resume",
"def pause(self, instance):\n self.power_off(instance)",
"def suspend(self):\n\n LOG.info(_(\"Suspending DM device %r ...\"), self.dm_name)\n cmd = [self.dmsetup_cmd, 'suspend', self.dm_name]\n start_time = time.time()\n (ret_code, std_out, std_err) = self.call(cmd, quiet=True, sudo=True)\n if ret_code:\n raise DmSuspendError(self.dm_name, ret_code, std_err)\n\n if self.simulate:\n return\n\n self.retr_suspended()\n if not self.suspended:\n i = 0\n while i < 10:\n LOG.debug(\n _(\"DM device %r is not suspended yet, but it should so. \"\n \"Waiting a minimal time ...\"),\n self.dm_name)\n time.sleep(0.2)\n self.retr_suspended()\n if self.suspended:\n break\n i += 1\n\n if not self.suspended:\n msg = _(\n \"not suspended after %0.3f seconds, but it should so\") % (\n start_time - time.time())\n raise DmSuspendError(self.dm_name, 99, msg)\n\n LOG.debug(\n _(\"DM device %(dev)r suspended in %(sec)0.3f seconds.\") % {\n 'dev': self.dm_name, 'sec': (start_time - time.time())})",
"def resume(self, context, instance, network_info, block_device_info=None):\n LOG.info(\"Resuming instance %s\" % instance.uuid)\n self.power_on(context, instance, network_info, block_device_info)",
"def reboot(self, node):",
"def _pause(self):\n data_paused = None\n while self.target.is_active and data_paused != '01':\n data_paused = self._mem_read(self.data['paused'][0], 1)\n time.sleep(self.pause_time)\n self.data['paused'][1] = data_paused\n return",
"async def wakeup(self) -> None:\n return await self.relay(\"wakeup\")()",
"def ex_resume_node(self, node):\n domain = self._get_domain_for_node(node=node)\n return domain.resume() == 0",
"def pause(instance):\n if instance.state == STOPPED:\n return\n\n Queue.objects.add(function=\"pause\", instance=instance)",
"def suspend(name, call=None, session=None):\n if call == \"function\":\n raise SaltCloudException(\n \"The show_instnce function must be called with -a or --action.\"\n )\n if session is None:\n session = _get_session()\n log.info(\"Suspending VM %s\", name)\n vm = _get_vm(name, session)\n task = session.xenapi.Async.VM.suspend(vm)\n _run_async_task(task, session)\n return show_instance(name)",
"def kill(self):\n self.active = False\n self.wakeup()\n self.join()",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"def power(self, node_uuid, target):\n # TODO(lucasagomes): Test if target is a valid state and if it's able\n # to transition to the target state from the current one\n rpc_node = objects.Node.get_by_uuid(pecan.request.context, node_uuid)\n if rpc_node.target_power_state is not None:\n raise wsme.exc.ClientSideError(_(\"Power operation for node %s is \"\n \"already in progress.\") %\n rpc_node['uuid'],\n status_code=409)\n # Note that there is a race condition. The node state(s) could change\n # by the time the RPC call is made and the TaskManager manager gets a\n # lock.\n pecan.request.rpcapi.change_node_power_state(pecan.request.context,\n node_uuid, target)\n return NodeStates.convert(rpc_node)",
"def enter_sleep_mode(self):\n self.execute(SdpI2cCmdEnterSleepMode())"
] | [
"0.6543371",
"0.6413404",
"0.63032746",
"0.61525834",
"0.6133355",
"0.60711706",
"0.6034479",
"0.5965572",
"0.5920459",
"0.58596003",
"0.58546805",
"0.56954277",
"0.5682778",
"0.5622784",
"0.56010187",
"0.55234486",
"0.5516325",
"0.5430145",
"0.54232126",
"0.5413509",
"0.53554296",
"0.5307849",
"0.52865475",
"0.5281036",
"0.5274343",
"0.52658594",
"0.52640176",
"0.52351594",
"0.5230325",
"0.5177669"
] | 0.6554416 | 0 |
Retrieve Node object for a domain with a provided uuid. | def ex_get_node_by_uuid(self, uuid):
domain = self._get_domain_for_uuid(uuid=uuid)
node = self._to_node(domain=domain)
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_node(uuid, **fields):\n try:\n with session_for_read() as session:\n res = session.query(model.Node).filter_by(\n uuid=uuid, **fields).one()\n return model.Node(uuid=res.uuid, version_id=res.version_id,\n state=res.state, started_at=res.started_at,\n finished_at=res.finished_at, error=res.error,\n manage_boot=res.manage_boot)\n except (orm_errors.NoResultFound, orm_errors.StaleDataError):\n raise utils.NodeNotFoundInDBError()",
"def _get_domain_for_uuid(self, uuid):\n domain = self.connection.lookupByUUIDString(uuid)\n return domain",
"def get_node(self, uuid, clean=True):\n if clean:\n uuid = ProcessNode.strip_uuid(uuid)\n return self._get_tree_queryset().get(uuid_full__startswith=uuid)",
"def _get_domain_for_node(self, node):\n domain = self.connection.lookupByUUIDString(node.uuid)\n return domain",
"def get_one(self, uuid):\n if self._from_chassis:\n raise exception.OperationNotPermitted\n\n rpc_node = objects.Node.get_by_uuid(pecan.request.context, uuid)\n return Node.convert_with_links(rpc_node)",
"def get_by_uuid(self, uuid):\n return self.get(uuid=uuid)",
"def read_node(\n *,\n storage: AbstractStorage = Depends(get_storage),\n node_id: str = Query(\n \"\",\n title=\"Node unique identifier\",\n description=\"The node's ID.\"\n ),\n is_user_authorized: str = Depends(authenticate)\n):\n node = storage.get_node(node_id)\n\n if not node:\n raise HTTPException(status_code=404, detail=\"Node not found\")\n\n return node",
"def get_node_uuid(self):\n if self.node is None or 'uuid' not in self.node:\n raise errors.UnknownNodeError()\n return self.node['uuid']",
"def get_node(cur, id):\n sql = \"\"\"\n SELECT\n *\n FROM\n nodes\n WHERE\n id = %s;\n \"\"\"\n if not isinstance(id, str):\n raise TypeError('ID must be type string (UUID4).')\n\n cur.execute(sql, (id, ))\n result = cur.fetchone()\n\n if result is None:\n raise exceptions.NodeNotFound(id)\n else:\n return NodeData(**result)",
"def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n",
"def get_node(self, _id):\n return self.make_request(\"GET\", \"nodes/\"+_id, {})",
"def get_node(port, identity: dht.Identity=None, is_bootstrap=False, ipv4:str=\"\", \n ipv6:str=\"\", config:dht.DhtConfig=None, bootstrap_node=None,\n logfile=None):\n log.debug(f\"get_node called for port {port}\")\n global nodes\n if port not in nodes or not nodes[port].isRunning():\n n = dht.DhtRunner()\n if config is not None:\n n.run(id=identity, is_bootstrap=is_bootstrap, port=port, ipv4=ipv4, ipv6=ipv6, config=config)\n else:\n n.run(id=identity, is_bootstrap=is_bootstrap, port=port, ipv4=ipv4, ipv6=ipv6)\n nodes[port] = n\n\n if logfile:\n n.enableFileLogging(logfile)\n\n if bootstrap_node is not None:\n bhost, bport = bootstrap_node.split(':')\n n.bootstrap(bhost, bport)\n\n return nodes[port]",
"def get_article(uuid):\n return Article.get(Article.uuid == uuid)",
"def get_composed_node_by_uuid(cls, composed_node_uuid):\n return cls.dbdriver.get_composed_node_by_uuid(composed_node_uuid)",
"def ex_get_node_by_name(self, name):\n domain = self._get_domain_for_name(name=name)\n node = self._to_node(domain=domain)\n return node",
"def get_node(self, node_id) -> Node:\n return self._node_serializer.from_data(graph=self, **self._collection.get_record(node_id))",
"def by_uuid(cls, uuid):\n return dbsession.query(cls).filter_by(uuid=uuid).first()",
"def get_node_by_id(self, node_name: Hashable):\n return self._meta.get_node(node_name)",
"def get_by_uuid(self, uuid):\n\n result_filter = [r for r in self.content if r.uuid() == uuid]\n\n try:\n return result_filter[0]\n except IndexError:\n return None",
"def get_node(self, project_id, node_id):\n _url = f\"{self.base_url}/projects/{project_id}/nodes/{node_id}\"\n return self.http_call(\"get\", _url).json()",
"def find(cls, uuid):\n entries = cls.objects.filter(uuid=uuid)\n if not entries:\n return None\n else:\n return entries.first()",
"def get_visitor(uuid):\n log.debug('Getting visitor by uuid(%s)', uuid)\n try:\n visitor = models.Visitor.objects.get(uuid=uuid)\n except Exception:\n visitor = models.Visitor()\n visitor.uuid = uuid\n visitor.save()\n return visitor",
"def get_node(deployment_id, node_id):\n nodes = get_storage_manager().list(\n Node,\n filters={'deployment_id': deployment_id, 'id': node_id}\n )\n if not nodes:\n raise NotFoundError(\n 'Requested Node with ID `{0}` on Deployment `{1}` '\n 'was not found'.format(node_id, deployment_id)\n )\n return nodes[0]",
"def get(self, uuid):\n\n\t\treturn self._get(\"/tag/%s\" % base.getid(uuid), \"tag\")",
"def by_uuid(cls, _uuid):\n return dbsession.query(cls).filter_by(uuid=_uuid).first()",
"def by_uuid(cls, _uuid):\n return dbsession.query(cls).filter_by(uuid=_uuid).first()",
"async def retrieve_node(request: web.Request) -> web.Response:\n path_params = parse_request_path_parameters_as(_NodePathParams, request)\n\n try:\n data = await request.json()\n port_keys = data.get(\"port_keys\", [])\n except json.JSONDecodeError as exc:\n raise web.HTTPBadRequest(reason=f\"Invalid request body: {exc}\") from exc\n\n return web.json_response(\n await director_v2_api.retrieve(\n request.app, f\"{path_params.node_id}\", port_keys\n ),\n dumps=json_dumps,\n )",
"def info(self, node_uuid):\n if node_uuid is None:\n return None\n uri = '{}/{}'.format(self.store.aroot, node_uuid)\n infos = self.store.actual.resolve(uri)\n if infos is None:\n return None\n return json.loads(infos)",
"def node_by_id(self, identifier):\n for node in self.nodes:\n if node.identifier == identifier:\n return node\n raise Exception(\"Node '{0}' not available in {1}\".format(\n identifier, self.name))",
"def get_node_by_id(self, node_id):\n try:\n return self._nodes[node_id]\n except KeyError:\n return None"
] | [
"0.73299086",
"0.70159495",
"0.70025486",
"0.6854524",
"0.6605886",
"0.5908822",
"0.58769524",
"0.58602464",
"0.5852138",
"0.57863677",
"0.5778645",
"0.5746345",
"0.573596",
"0.57004017",
"0.56440103",
"0.56256723",
"0.5552938",
"0.5542634",
"0.5524711",
"0.5483632",
"0.54819095",
"0.5447594",
"0.5446559",
"0.5413213",
"0.5346966",
"0.5346966",
"0.5322931",
"0.53140634",
"0.527704",
"0.525663"
] | 0.85098386 | 0 |
Retrieve Node object for a domain with a provided name. | def ex_get_node_by_name(self, name):
domain = self._get_domain_for_name(name=name)
node = self._to_node(domain=domain)
return node | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain",
"def get_node(self, name):\n\n assert name in self.nodes\n return self.nodes[name]",
"def get_node_by_name(self, name):\n\n for node in self.nodes:\n if node.name == name:\n return node\n\n return None",
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def get_node(self, name):\n return self._node_reg[name]",
"def get_node(self, name):\n if name in self._nodes:\n return self._nodes[name]\n return None",
"def get_node_by_id(self, node_name: Hashable):\n return self._meta.get_node(node_name)",
"def get_node(self, name, create=False):\n\n try:\n node = self.find_node(name, create)\n except KeyError:\n node = None\n return node",
"def get_node(self, name):\n return self.source_net.nodes[name]",
"def get_node_with_name(self, name):\n\t return self.variables[name]",
"def findNode(self, name):\n return self._nodes.get(name)",
"def get_node(conn, name):\n datacenter_id = get_datacenter_id()\n\n for item in conn.list_servers(datacenter_id)[\"items\"]:\n if item[\"properties\"][\"name\"] == name:\n node = {\"id\": item[\"id\"]}\n node.update(item[\"properties\"])\n return node",
"def get_node(self, name=None, node_id=None):\n if node_id:\n return self._search_node(key=\"node_id\", value=node_id)\n elif name:\n return self._search_node(key=\"name\", value=name)\n else:\n raise ValueError(\"name or node_ide must be provided\")",
"def node(self, name):\r\n return self.nodes[name]",
"def _get_domain_for_node(self, node):\n domain = self.connection.lookupByUUIDString(node.uuid)\n return domain",
"def get_node_by_name(self, name):\r\n root = self.get_xml_root()\r\n return root.find(name)",
"def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain",
"def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain",
"def get_node(self, node_name):\n # (str) -> dict\n # TODO: Return\n return self.name_to_nodes[node_name]",
"def get_node(self, name):\n response = self.connection.request(action=\"/v1/containers/%s\" % name, method=\"GET\")\n container = response.parse_body()\n \n try:\n state = self.NODE_STATE_MAP[container['state']]\n except KeyError:\n state = NodeState.UNKNOWN\n \n return Node( id=container['name'],\n name=container['name'],\n state=state,\n public_ips=container['ips'],\n private_ips=[],\n driver=self,\n image=self.list_images()[0])",
"def nodeFromName(self, name):\n for item in self.items():\n if isinstance(item, NodeItem):\n if item.name() == name:\n return item\n return None",
"def __getattr__(self, name):\n if name in self.domains:\n return self.domains[name]\n\n raise AttributeError('No domain named %s found.' % name)",
"def read_node(name: str = '', value: str = '') -> Node:\n first_node = read_all_nodes(name=name, value=value).first()\n return first_node",
"def find_node(self, name, create=False):\n\n name = self._validate_name(name)\n node = self.nodes.get(name)\n if node is None:\n if not create:\n raise KeyError\n node = self.node_factory()\n self.nodes[name] = node\n return node",
"def get_create_named_node(self, node_id_name):\n n = node_id_name.split(\"_\", 1)\n node_id = int(n[0], 16)\n if node_id in self.nodes_dict:\n node = self.nodes_dict[node_id]\n else:\n node = self.get_create_node(node_id)\n\n if len(n) == 2 and node.node_name != n[1]:\n node.node_name = n[1]\n\n return node",
"def name_to_node(name):\n selectionList = MSelectionList()\n selectionList.add(name)\n node = MObject()\n selectionList.getDependNode(0, node)\n return node",
"def createNode(self, name):\n return Node(name)",
"def get(cls, subdomain, name):\n return cls.get_by_key_name(subdomain + ':' + name)",
"def find_domain(self):\n for network in self.network_set.all():\n if network.site:\n expected_name = \"{0}.{1}.mozilla.com\".format(self.name,\n network.site.get_site_path())\n try:\n domain = Domain.objects.get(name=expected_name)\n except ObjectDoesNotExist, e:\n continue\n return domain.name\n\n return None",
"def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())"
] | [
"0.7113652",
"0.6740635",
"0.6728727",
"0.6652932",
"0.6599187",
"0.65716195",
"0.6494387",
"0.6490278",
"0.6489191",
"0.6455513",
"0.6446057",
"0.64404565",
"0.64146894",
"0.6349402",
"0.6293593",
"0.62350583",
"0.62033457",
"0.61973226",
"0.6163783",
"0.6153157",
"0.6134103",
"0.6039822",
"0.60272914",
"0.5953718",
"0.59506005",
"0.58854717",
"0.58680093",
"0.58653575",
"0.5855463",
"0.5835195"
] | 0.841673 | 0 |
Return a system hostname on which the hypervisor is running. | def ex_get_hypervisor_hostname(self):
hostname = self.connection.getHostname()
return hostname | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def hostname():\n return socket.gethostname()",
"def get_hostname():\n\thostname = socket.gethostname()\n\n\treturn hostname",
"def hostname(self) -> str:\n _args: list[Arg] = []\n _ctx = self._select(\"hostname\", _args)\n return _ctx.execute_sync(str)",
"def get_hostname():\n host = os.getenv(\"OPSIM_HOSTNAME\")\n if host is None or host == \"\":\n import socket\n host = socket.gethostname()\n host = host.split('.')[0]\n return host",
"def hostname():\n hostname = socket.gethostname()\n if '.' in hostname:\n hostname = hostname.split('.')[0]\n return hostname",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def get_hostname(self):\n return self.name",
"def get_host_name():\n return socket.gethostname()",
"def get_hostname():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/hostname\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def gethostname():\n if socket.gethostname().find('.') >= 0:\n host = socket.gethostname()\n else:\n host = socket.gethostbyaddr(socket.gethostname())[0]\n return host",
"def get_hostname(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetHostname', self.handle)",
"def get_host_name(self):\n\t\treturn call_sdk_function('PrlSrvInfo_GetHostName', self.handle)",
"def get_hostname():\n consoleOutput = exec_console_command(\"hostname\")\n\n return consoleOutput",
"def get_hostname() -> str:\n if config.config is not None and \"hostname\" in config.config.get(\"base\", dict()):\n return config.config[\"base\"][\"hostname\"]\n\n return socket.gethostname().split(\".\")[0]",
"def get_hostname(self):\n raise NotImplementedError('get_hostname')",
"def hostname(self) -> Optional[str]:\n return pulumi.get(self, \"hostname\")",
"def hostname(self):\n return self._hostname",
"def get_hostname(self):\n\t\treturn call_sdk_function('PrlSrvCfg_GetHostname', self.handle)",
"def get_hostname():\n return re.split(\"\\.\", env.host)[0]",
"async def get_hostname(self):\n ngc = await self.middleware.call('network.configuration.config')\n if 'hostname_virtual' in ngc:\n failover_status = await self.middleware.call('failover.status')\n if failover_status == 'MASTER':\n return ngc['hostname_virtual']\n elif failover_status == 'BACKUP':\n return None\n else:\n return ngc['hostname_local']",
"def hostname(self):\n return 'localhost'",
"def GetServerHost():\n return GetHostName(True)",
"def get_hostname(self):\n return self.mycam.devicemgmt.GetHostname()",
"def host_name(self):\n return self._host_name",
"def get_hostname():\n global HOST\n if '.' in HOST:\n HOST = HOST.split('.')[0]\n return HOST",
"def Hostname(self):\n return self._get_attribute('hostname')",
"async def get_hostname(self):\n\n # Display info message\n log.info(\"get_hostname\")\n\n # Get hostname\n output = await self.send_command(self.cmd_get_hostname)\n\n # Display info message\n log.info(f\"get_hostname: output: '{output}'\")\n\n # Remove the useless information in the returned string\n output = output.split(\"System Name: \")[1].strip()\n\n # Display info message\n log.info(f\"get_hostname: hostname found: '{output}'\")\n\n # Return the name of the device\n return output",
"def get_host(self) -> str:\n return self.socket.getsockname()[0]",
"def get_hostname(self):\n # We set a default in install.py in case it isn't preseeded but when we\n # preseed, we are looking for None anyhow.\n return ''",
"def hostname(self,hostname=None):\n return self.ssh_host(hostname)"
] | [
"0.8208617",
"0.8037137",
"0.8012449",
"0.79964304",
"0.7980484",
"0.79791266",
"0.79482585",
"0.7933461",
"0.7856743",
"0.7811493",
"0.77951044",
"0.77097285",
"0.76754904",
"0.7673008",
"0.7661361",
"0.7646124",
"0.7639138",
"0.76167923",
"0.7588144",
"0.75737196",
"0.75471085",
"0.7521853",
"0.75140756",
"0.7479457",
"0.7450926",
"0.74483824",
"0.7441556",
"0.74290395",
"0.7411516",
"0.74044615"
] | 0.87602705 | 0 |
Retrieve hypervisor system information. | def ex_get_hypervisor_sysinfo(self):
xml = self.connection.getSysinfo()
etree = ET.XML(xml)
attributes = ["bios", "system", "processor", "memory_device"]
sysinfo = {}
for attribute in attributes:
element = etree.find(attribute)
entries = self._get_entries(element=element)
sysinfo[attribute] = entries
return sysinfo | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_hypervisor_info(self):\n try:\n req = Request(self.compute_url +\n \"/os-hypervisors/detail\" )\n self._upgrade_to_authenticated_request(req)\n resp = urlopen(req)\n content = resp.read().decode('utf-8')\n encoded = json.loads(content)\n resp.close()\n except URLError as e:\n return {}\n except Exception as e:\n raise Exception(\"Unable to process compute reponse: %s\" % e)\n\n return encoded['hypervisors']",
"def _get_host_details(self):\n # Assuming only one system present as part of collection,\n # as we are dealing with iLO's here.\n status, headers, system = self._rest_get('/rest/v1/Systems/1')\n if status < 300:\n stype = self._get_type(system)\n if stype not in ['ComputerSystem.0', 'ComputerSystem.1']:\n msg = \"%s is not a valid system type \" % stype\n raise exception.IloError(msg)\n else:\n msg = self._get_extended_error(system)\n raise exception.IloError(msg)\n\n return system",
"def get_system_info():\n query = {\"type\": \"op\", \"cmd\": \"<show><system><info></info></system></show>\"}\n\n return __proxy__[\"panos.call\"](query)",
"def get_system_information(self):\n\t\tsys = platform.uname()\n\t\treturn {\n\t\t\t'hostname': sys.node,\n\t\t\t'operating_system': sys.system,\n\t\t\t'version': sys.version,\n\t\t\t'release': sys.release,\n\t\t\t'processor' : sys.processor,\n\t\t\t'processor_type': sys.machine,\n\t\t}",
"async def get_system_info(self) -> Dict[str, Any]:\n assert self._client is not None\n return await self._client.invoke_method(\"system.info\")",
"def _get_system_hardware(self):\n return self._get_system_status()[\"hardware\"]",
"async def sysinfo(self, ctx: Context):\n\t\tstart = time.perf_counter()\n\t\tend = time.perf_counter()\n\t\tduration = (end - start) * 1000\n\t\tcpuavg = psutil.cpu_percent(interval=None)\n\t\tmem = psutil.virtual_memory()[2]\n\t\tdurround = round(duration, 3)\n\t\tosun = os.uname()\n\t\tawait self.send(f\"System Info | CPU: {cpuavg}% | RAM: {mem}% | Latency: {durround * 1000}ms | OS: {sys.platform}\", whisper=[ctx.author.id])",
"def get_system_info(self):\r\n method = self.public_endpoints['system_info']['method']\r\n url = self.base_url + self.public_endpoints['system_info']['url']\r\n req = requests.request(method, url)\r\n res = req.json()\r\n\r\n if res['success'] == True:\r\n return res[\"result\"]\r\n else:\r\n return res",
"def system_info(self, system_id):\n\n\t\tpath = f'{self.BIKE_ENDPOINT}system/{system_id}/{self.secret_key}'\n\t\tresponse = requests.get(path).json()\n\t\tself.check_api_key(response)\n\n\t\treturn response",
"def compute_hypervisors_statistics(self):\n path = '/os-hypervisors/statistics'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack hypervisors statistics: %s' % truncate(res))\n return res[0]['hypervisor_statistics']",
"def sys_info(self):\n\n for i in self._nodes.items():\n print(\"\\n==============================\")\n name = i[0]\n node = i[1]\n\n print(\"NODE: {}\\n\".format(name))\n\n # CPU\n print(\"CPU:\")\n self.cpu_info(node)\n\n # Grub\n print(\"\\nGrub Command Line:\")\n if \"grub\" in node:\n print(\" Current: {}\".format(node[\"grub\"][\"current_cmdline\"]))\n print(\" Configured: {}\".format(node[\"grub\"][\"default_cmdline\"]))\n\n # Huge Pages\n print(\"\\nHuge Pages:\")\n self.hugepage_info(node)\n\n # Devices\n print(\"\\nDevices:\")\n self.device_info(node)\n\n # Status\n print(\"\\nVPP Service Status:\")\n state, errors = VPPUtil.status(node)\n print(\" {}\".format(state))\n for e in errors:\n print(\" {}\".format(e))\n\n # Minimum system resources\n self.min_system_resources(node)\n\n print(\"\\n==============================\")",
"def getSysinfo(self, request):\r\n return self._ref.callRemote('getSysinfo')",
"def remote_getSysinfo(self, request):\r\n # TODO : replace these calls with call to rce.util.sysinfo\r\n response_table = {\r\n 'size':self._size,\r\n 'cpu':self._cpu,\r\n 'memory': self._memeory,\r\n 'bandwidth': self._bandwidth,\r\n # 'keyword': some value or function to provide the data\r\n }\r\n\r\n return response_table[request]",
"def get_software_info():\n query = {\n \"type\": \"op\",\n \"cmd\": \"<request><system><software><info></info></software></system></request>\",\n }\n\n return __proxy__[\"panos.call\"](query)",
"def compute_hypervisors(self):\n path = '/os-hypervisors/detail'\n res = self.compute.call(path, 'GET', data='', \n token=self.manager.identity.token)\n self.logger.debug('Get openstack hypervisors: %s' % truncate(res))\n return res[0]['hypervisors']",
"def get_local_hypervisor(self):\n # Look up hypervisors available filtered by my hostname\n host = self.get_my_hostname()\n hyp = self.get_all_hypervisor_ids(filter_by_host=host)\n if hyp:\n return hyp[0]",
"def system(self):\n return self['system']",
"def get_supervisor_info(hass: HomeAssistant) -> dict[str, Any] | None:\n return hass.data.get(DATA_SUPERVISOR_INFO)",
"def get_version_info(self):\n sys_info_service = self.robot.all_services.get(\"sys_info\")\n if sys_info_service is not None:\n log.info(\"System version info: %s\" % sys_info_service.system_version)\n else:\n log.warning(\"Service get_version_info is not enabled!\")",
"def GetNodeInfo(self, hvparams=None):\n return self.GetLinuxNodeInfo()",
"def _get_system_status(self):\n sysinfo_strings = self._command(self.commands[\"SYSTEM_STATUS\"])\n sysinfo_dict = {\"name\": sysinfo_strings[0]}\n for line in sysinfo_strings:\n if \":\" in line:\n key, value = line.split(\":\", 1)\n sysinfo_dict[key.lower()] = value.strip()\n\n return sysinfo_dict",
"def get_system_info() -> SystemInfo:\n\n assert is_windows(), 'This function is only available on Windows systems'\n\n from win32api import GetSystemInfo\n return SystemInfo(*GetSystemInfo())",
"def describe_operating_systems():\n pass",
"def system_status(system_ip):\n\n click.secho(\"\\nRetrieving the System Status\")\n\n url = base_url + \"/device/system/status?deviceId={0}\".format(system_ip)\n\n response = requests.get(url=url, headers=header,verify=False)\n if response.status_code == 200:\n items = response.json()['data']\n else:\n print(\"Failed to get system status \" + str(response.text))\n exit()\n\n print(\"\\nSystem status for Device:\",system_ip)\n\n headers = [\"Host name\", \"Up time\", \"Version\", \"Memory Used\", \"CPU system\"]\n table = list()\n\n for item in items:\n tr = [item['vdevice-host-name'], item['uptime'], item['version'], item['mem_used'], item['cpu_system']]\n table.append(tr)\n\n try:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"fancy_grid\"))\n except UnicodeEncodeError:\n click.echo(tabulate.tabulate(table, headers, tablefmt=\"grid\"))",
"def computer_info():\n return {\n 'system': platform.system(),\n 'architecture': platform.architecture(),\n 'name': platform.node(),\n 'release': platform.release(),\n 'version': platform.version(),\n 'machine': platform.machine(),\n 'processor': platform.processor(),\n 'virtual CPUs': mproc.cpu_count(),\n 'total RAM': _get_ram(),\n }",
"def remote_info():\n run('uname -a')",
"def system_info() -> str:\n return \"\\n\".join(\n [\n f\"Python version: {platform.python_version()}\",\n f\"Python implementation: {platform.python_implementation()}\",\n f\"Python compiler: {platform.python_compiler()}\",\n f\"PyTorch version: {torch.__version__}\",\n f\"System: {platform.system() or 'Unable to determine'}\",\n f\"System version: {platform.release() or 'Unable to determine'}\",\n f\"Processor: {platform.processor() or 'Unable to determine'}\",\n f\"Number of CPUs: {multiprocessing.cpu_count()}\",\n ]\n )",
"async def get_system(self) -> dict[str, Any]:\n cmd = await self.send_command(\"SYSTEM\", timeout=1)\n if not cmd.succeeded():\n raise ArchonError(f\"Command finished with status {cmd.status.name!r}\")\n\n keywords = str(cmd.replies[0].reply).split()\n system = {}\n for (key, value) in map(lambda k: k.split(\"=\"), keywords):\n system[key.lower()] = value\n if match := re.match(r\"^MOD([0-9]{1,2})_TYPE\", key, re.IGNORECASE):\n name_key = f\"mod{match.groups()[0]}_name\"\n system[name_key] = ModType(int(value)).name\n\n return system",
"def get_system_description(\n submitter: str,\n division: str,\n status: str,\n system_name: Optional[str] = None,\n host_processors_per_node: Optional[int] = None,\n) -> Dict[str, str]:\n is_cuda = torch.cuda.is_available()\n cpu_info = cpuinfo.get_cpu_info()\n\n system_desc = {\n 'submitter': submitter,\n 'division': division,\n 'status': status,\n 'number_of_nodes': dist.get_world_size() / dist.get_local_world_size(),\n 'host_processors_per_node': str(host_processors_per_node) if host_processors_per_node else '',\n 'host_processor_model_name': str(cpu_info.get('brand_raw', 'CPU')),\n 'host_processor_core_count': str(psutil.cpu_count(logical=False)),\n 'host_processor_vcpu_count': '',\n 'host_processor_frequency': '',\n 'host_processor_caches': '',\n 'host_processor_interconnect': '',\n 'host_memory_capacity': '',\n 'host_storage_type': '',\n 'host_storage_capacity': '',\n 'host_networking': '',\n 'host_networking_topology': '',\n 'host_memory_configuration': '',\n 'accelerators_per_node': str(dist.get_local_world_size()) if is_cuda else '0',\n 'accelerator_model_name': str(torch.cuda.get_device_name(None)) if is_cuda else '',\n 'accelerator_host_interconnect': '',\n 'accelerator_frequency': '',\n 'accelerator_on-chip_memories': '',\n 'accelerator_memory_configuration': '',\n 'accelerator_memory_capacity': '',\n 'accelerator_interconnect': '',\n 'accelerator_interconnect_topology': '',\n 'cooling': '',\n 'hw_notes': '',\n 'framework':\n f'PyTorch v{torch.__version__} and MosaicML composer v{composer.__version__}', # type: ignore (third-party missing stub)\n 'other_software_stack': {\n 'cuda_version': torch.version.cuda if is_cuda else '', # type: ignore (third-party missing stub)\n 'composer_version': composer.__version__,\n 'python_version': sys.version,\n },\n 'operating_system': f'{platform.system()} {platform.release()}',\n 'sw_notes': '',\n }\n\n if system_desc['number_of_nodes'] != 1:\n warnings.warn('Number of nodes > 1 not tested, proceed with caution.')\n\n if system_name is None:\n world_size = dist.get_world_size()\n if is_cuda:\n device_name = system_desc['accelerator_model_name']\n else:\n device_name = system_desc['host_processor_model_name']\n\n device_name = device_name.replace(' ', '_')\n system_name = f'{world_size}x{device_name}_composer'\n\n # default to system name as \"[world_size]x[device_name]\"\n # e.g. 8xNVIDIA_A100_80GB\n system_desc['system_name'] = system_name\n return system_desc",
"def Hypervisor(self):\n from ixnetwork_restpy.testplatform.sessions.ixnetwork.availablehardware.virtualchassis.hypervisor.hypervisor import Hypervisor\n return Hypervisor(self)"
] | [
"0.7540559",
"0.72924674",
"0.7231328",
"0.72079253",
"0.7133061",
"0.6733231",
"0.6650252",
"0.6599447",
"0.6597166",
"0.6572756",
"0.6566554",
"0.65332323",
"0.6496247",
"0.6453271",
"0.640171",
"0.6376914",
"0.6345654",
"0.6250193",
"0.6224385",
"0.62230563",
"0.61986095",
"0.6154817",
"0.60985935",
"0.60770655",
"0.6076943",
"0.5995173",
"0.597038",
"0.59685403",
"0.59587914",
"0.59379834"
] | 0.799808 | 0 |
Retrieve IP addresses for the provided domain. | def _get_ip_addresses_for_domain(self, domain):
result = []
if platform.system() != "Linux":
# Only Linux is supported atm
return result
if "///" not in self._uri:
# Only local libvirtd is supported atm
return result
mac_addresses = self._get_mac_addresses_for_domain(domain=domain)
arp_table = {}
try:
cmd = ["arp", "-an"]
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = child.communicate()
arp_table = self._parse_ip_table_arp(arp_output=stdout)
except OSError as e:
if e.errno == 2:
cmd = ["ip", "neigh"]
child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, _ = child.communicate()
arp_table = self._parse_ip_table_neigh(ip_output=stdout)
for mac_address in mac_addresses:
if mac_address in arp_table:
ip_addresses = arp_table[mac_address]
result.extend(ip_addresses)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getIPs(self, domain = \"localhost\"):\n # convert 'domain' to string, in case of erroneous type being passed\n domain = str(domain)\n\n # Kind warning for those who entered an IP address instead of a domain\n try: \n inet_aton(domain)\n print(\"Warning: an IP address was given instead of a domain name.\")\n except:\n pass\n\n # Try to query DNS records to populate A-Record IP list\n # Prints errors and returns None if exceptions found\n try:\n iplist = gethost(domain)[2]\n except gaierror as ge:\n if ge.errno == -2:\n print(\"Error: Domain '{}' invalid, or unknown. \"\\\n \"Please check proper spelling and format.\\n\"\\\n \"(e.g.: python dns_get_A_record_IPs.py google.com )\".format(domain))\n elif ge.errno == -3:\n print(\"Error: Domain '{}' unreachable. Please check your connection.\".format(domain))\n return None\n except timeout:\n print(\"Error: Connection to {} timed out.\".format(domain))\n return None\n\n return iplist",
"def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))",
"def resolve_ip_from_domain(logger, domain):\n logger.debug(f\"Obtain the ip address from domain: {domain}\")\n ip = ''\n try:\n ip = socket.gethostbyname(name)\n except Exception as e:\n logger.warning(f\"Error resolving domain to ip: {ip}. \"\n f\"Error: {e.__class__}, {e}\")\n return ip",
"def _get_IP_addresses(hostname):\n try:\n answers, auth, addit = yield DNSclient.lookupAddress(hostname)\n except Exception as exc: # Too many different DNS failures to catch...\n log.exception('DNS Resolution failure: %r for name: %r', exc, hostname)\n returnValue([])\n\n returnValue(\n [answer.payload.dottedQuad()\n for answer in answers if answer.type == dns.A])",
"def list(self, domain):\n return request(\n API_LIST.DNS_LIST.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain\n }\n )",
"def _get_ip_record(domain, domain_validator, ti_prov):\n ip_record = None\n if domain_validator.is_resolvable(domain) is True:\n try:\n answer = dns.resolver.query(domain, \"A\")\n except dns.resolver.NXDOMAIN:\n md(\"Could not resolve IP addresses from domain.\")\n resolved_domain_ip = answer[0].to_text()\n ip_whois_result = whois(resolved_domain_ip)\n ip_record = pd.DataFrame(\n {\n \"IP Address\": [resolved_domain_ip],\n \"Domain\": [ip_whois_result.get(\"domain_name\", None)],\n \"Registrar\": [ip_whois_result.get(\"asn_description\", None)],\n \"Country\": [ip_whois_result.get(\"country\", None)],\n \"Creation Date\": [ip_whois_result.get(\"creation_date\", None)],\n }\n )\n if isinstance(ip_record, pd.DataFrame) and not ip_record.empty:\n ip_record = _process_tor_ip_record(ip_record, ti_prov)\n ip_record = _process_previous_resolutions(ip_record, ti_prov)\n return ip_record",
"def get_ipaddresses(auth):\n url_ipaddresses = \"http://\" + auth.ipaddr + \"/rest/\"+auth.version+\"/ipaddresses\"\n try:\n r = requests.get(url_ipaddresses, headers = auth.cookie)\n ipaddresses = json.loads(r.text)['ip_address_subnet_element']\n return ipaddresses\n except requests.exceptions.RequestException as error:\n return \"Error:\\n\" + str(error) + \" get_ipaddresses: An Error has occured\"",
"def resolv(hostname):\n\n ips = list()\n\n # Create resolver object\n res = resolver.Resolver()\n\n # Choose the correct DNS servers\n # Blue DNS servers\n if hostname.startswith('b-'):\n res.nameservers = ['172.16.2.10', '172.16.2.11']\n # Green DNS servers\n elif hostname.startswith('g-'):\n res.nameservers = ['10.0.2.10', '10.0.2.11']\n # Default to white DNS servers\n else:\n res.nameservers = ['194.47.252.134', '194.47.252.135']\n\n # Query\n try:\n query = res.query(hostname)\n for answer in query:\n ips.append(answer.address)\n except resolver.NXDOMAIN:\n raise CouldNotResolv\n\n # Return query result\n return ips",
"def get_domain_dns_records(domain):\n url_suffix = \"v1/domains/{}/records\".format(domain)\n ret = _call_endpoint(url_suffix)\n if isinstance(ret, dict) and ret.get('code', None) == \"UNKNOWN_DOMAIN\":\n # e.g. {'code': 'UNKNOWN_DOMAIN', 'message': 'The given domain is not registered, or does not have a zone file'}\n raise Exception(f\"Can't find domain {domain}. Are you sure your API key and secret are correct?: {ret}\")\n return ret",
"def get_ip(self):\n json_scheme = self.gen_def_json_scheme('GetPurchasedIpAddresses')\n json_obj = self.call_method_post(method='GetPurchasedIpAddresses ', json_scheme=json_scheme)\n self.iplist = IpList()\n for ip in json_obj['Value']:\n r = Ip()\n r.ip_addr = ip['Value']\n r.resid = ip['ResourceId']\n r.serverid = ip['ServerId'] if 'None' not in str(ip['ServerId']) else None\n self.iplist.append(r)",
"def fetch_domain_certs(domain):\n url = BASE_URL.format(domain)\n result = requests.get(url)\n if result.status_code != 200:\n result.raise_for_status()\n return result.json()",
"def getipaddrs(hostname):\n result = socket.getaddrinfo(hostname,None,0,socket.SOCK_STREAM)\n return [x[4][0] for x in result]",
"def getBaseIP(url: str) -> list:\n \n response = requests.get(url) #get data \n\n ip_sets = response.text\n ip_list = re.findall(r'(?:\\d{1,3}\\.)+(?:\\d{1,3})', ip_sets)\n \n return ip_list",
"def get_site_ip(domain: str = None) -> str:\n try:\n return socket.gethostbyname(domain)\n except Exception as ex:\n return 'error'",
"def ip_addresses(self):\n try:\n return socket.gethostbyaddr(self.fqdn)[-1]\n except socket.error as _:\n return ['127.0.0.1']",
"def _fallback_get_mx_domains(domain):\n try:\n query = dns.message.make_query(domain, dns.rdatatype.MX)\n answers = dns.query.udp(query, GOOGLE_DNS_IP).answer[0]\n return [a for a in answers if a.rdtype == dns.rdatatype.MX]\n except Exception:\n return []",
"def get_domain_ip_via_sni(self, path_tracefile, domain):\n packets = self.get_client_hello_packets(path_tracefile)\n for packet in packets:\n servername = self.get_client_hello_servername(packet)\n if servername == domain:\n ip = packet.getlayer(IP).dst\n return ip\n return -1",
"def ip_addresses(self) -> pulumi.Output[Sequence['outputs.IpMappingResponse']]:\n return pulumi.get(self, \"ip_addresses\")",
"def ipaddrs( host ):\n return socket.gethostbyaddr(host)[2][0]",
"def domainlist_reverseip(self, response):\n data = response.json()\n for ip in data['response']['ip_addresses']:\n for domain in ip['domain_names']:\n yield(domain.lower())",
"def reverse_ip(self, domain_or_ip, limit=None):\n params = {}\n if limit:\n params = {'limit':limit}\n if re.search('^(\\d{1,3}\\.){3}(\\d{1,3})$',domain_or_ip):\n uri = '/v1/{}/host-domains/'\n else:\n uri = '/v1/{}/reverse-ip/'\n return self.apiquery(uri.format(domain_or_ip), params)",
"def _get_ip_addresses(ip_addresses):\n ret = []\n for item in ip_addresses:\n ret.append(item)\n\n return ret",
"def _open_stack_get_ip_(srv):\n addr_info = srv.addresses\n for net in addr_info.keys():\n for addr in addr_info[net]:\n ip = addr['addr']\n return ip",
"def domain_lookup(domain):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': domain\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response",
"def main(self):\n results = []\n for t in Config.APPHOSTS:\n custom_domain = t[\"custom_domain\"]\n heroku_host = t[\"heroku_host\"]\n result = self.ip_update(custom_domain, heroku_host)\n results.append(result)\n return results",
"def get_ips(self, oid):\n path = '/servers/%s/ips' % oid\n res = self.client.call(path, 'GET', data='', token=self.manager.identity.token)\n self.logger.debug('List ip addresses for server %s: %s' % \n (oid, truncate(res)))\n return res[0]",
"def GetExternalIp():\n h = httplib2.Http(tempfile.gettempdir(), timeout=10)\n url = 'http://whatismyip.akamai.com'\n resp, content = h.request(url, 'GET')\n if resp.status == 200:\n return content\n for provider in (UltraDNSAuth(), MyResolverInfo()):\n answer = provider.GetClientIp()\n if answer:\n return answer",
"def target_dns_ip_addresses(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"target_dns_ip_addresses\")",
"def target_dns_ip_addresses(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"target_dns_ip_addresses\")",
"def get_input_domains():\n df = pandas.read_excel(\"AutoScrapy/files/EBE21 - Top 100 Onlineshops to scrapp.ods\", engine=\"odf\")\n list_of_addresses = df['Domain'].to_list()\n list_of_addresses = [(\"http://\" + address) for address in list_of_addresses]\n print(list_of_addresses)\n return list_of_addresses"
] | [
"0.83984834",
"0.77371424",
"0.6798919",
"0.6587187",
"0.6285699",
"0.61892295",
"0.60301226",
"0.6003413",
"0.5995572",
"0.598294",
"0.5933315",
"0.5922509",
"0.59050184",
"0.5893697",
"0.5892688",
"0.5844683",
"0.584002",
"0.58137757",
"0.57702386",
"0.57578325",
"0.5728746",
"0.5707015",
"0.56450206",
"0.5641254",
"0.5634866",
"0.5623089",
"0.56070125",
"0.55793464",
"0.55777866",
"0.5552125"
] | 0.79961646 | 1 |
Parses network interface MAC addresses from the provided domain. | def _get_mac_addresses_for_domain(self, domain):
xml = domain.XMLDesc()
etree = ET.XML(xml)
elems = etree.findall("devices/interface[@type='network']/mac")
result = []
for elem in elems:
mac_address = elem.get("address")
result.append(mac_address)
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_mac_addr_table(self, cmd_output, mac_regex):\n lines = ensure_string(cmd_output).split(\"\\n\")\n\n arp_table = defaultdict(list)\n for line in lines:\n match = mac_regex.match(line)\n\n if not match:\n continue\n\n groups = match.groups()\n ip_address = groups[0]\n mac_address = groups[1]\n arp_table[mac_address].append(ip_address)\n\n return arp_table",
"def _get_ip_addresses_for_domain(self, domain):\n result = []\n\n if platform.system() != \"Linux\":\n # Only Linux is supported atm\n return result\n\n if \"///\" not in self._uri:\n # Only local libvirtd is supported atm\n return result\n\n mac_addresses = self._get_mac_addresses_for_domain(domain=domain)\n\n arp_table = {}\n try:\n cmd = [\"arp\", \"-an\"]\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, _ = child.communicate()\n arp_table = self._parse_ip_table_arp(arp_output=stdout)\n except OSError as e:\n if e.errno == 2:\n cmd = [\"ip\", \"neigh\"]\n child = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n stdout, _ = child.communicate()\n arp_table = self._parse_ip_table_neigh(ip_output=stdout)\n\n for mac_address in mac_addresses:\n if mac_address in arp_table:\n ip_addresses = arp_table[mac_address]\n result.extend(ip_addresses)\n\n return result",
"def getmacaddrs():\n # Unpack just for the sake of being meaningful.\n ifaddrs, sockaddr_dl, sockaddr = PLATFORM_LOOKUP[PLATFORM]\n ptr = c_void_p(None)\n result = LIBC.getifaddrs(pointer(ptr))\n if result != 0:\n return {}\n ifa = ifaddrs.from_address(ptr.value)\n result = {}\n\n while True:\n name = ifa.ifa_name\n if name not in result:\n result[name] = []\n # Some interface (such as a TUN virtual network) doesn't give us\n # ifa_addr at all and we can usually skip them because they're hardly\n # relevant for our usage case.\n if ifa.ifa_addr:\n sa = sockaddr.from_address(ifa.ifa_addr)\n if sa.sa_family == AF_LINK:\n si = sockaddr_dl.from_address(ifa.ifa_addr)\n addr = \"%s\" % si\n if addr:\n result[name].append(addr)\n if ifa.ifa_next:\n ifa = ifaddrs.from_address(ifa.ifa_next)\n else:\n break\n\n LIBC.freeifaddrs(ptr)\n return result",
"def get_mac_address_table(self):\n\n mac_address_table = []\n command = '/interface bridge host print terse'\n\n output = self._send_command(command)\n\n for host in parse_terse_output(output):\n mac_address_table.append({\n 'mac': cast_mac(host.get('mac-address')),\n 'interface': host.get('interface'),\n 'vlan': -1,\n 'static': True if 'D' not in host.get('_flags') else False,\n 'active': True if 'X' not in host.get('_flags') else False,\n 'moves': -1,\n 'last_move': -1.0\n })\n\n return mac_address_table",
"def validate_and_normalize_mac(address):\n if not is_valid_mac(address):\n if constants.CLONE_ISO_MAC in address:\n # get interface name from the label\n intf_name = address.rsplit('-', 1)[1][1:]\n raise exception.ClonedInterfaceNotFound(intf=intf_name)\n else:\n raise exception.InvalidMAC(mac=address)\n return address.lower()",
"def scan(self):\n walk_result = utils.snmp_walk(self.host, self.credential,\n \"BRIDGE-MIB::dot1dTpFdbPort\")\n if not walk_result:\n return None\n\n mac_list = []\n for result in walk_result:\n if not result or result['value'] == str(0):\n continue\n temp = {}\n mac_numbers = result['iid'].split('.')\n temp['mac'] = self._get_mac_address(mac_numbers)\n temp['port'] = self._get_port(result['value'])\n temp['vlan'] = self._get_vlan_id(temp['port'])\n mac_list.append(temp)\n\n return mac_list",
"def find_my_IP_and_MAC():\n mac = ':'.join(re.findall('..', '%012x' % getnode()))\n # I write IP and not domain cause i want to save time.\n p = sr1(IP(dst=\"google.com\", ttl=0) / ICMP() / \"XXXXXXXXXXX\",verbose=0,timeout=5) #verbose = withuot output\n return mac,p.dst",
"def _parse_ip_table_arp(self, arp_output):\n arp_regex = re.compile(r\".*?\\((.*?)\\) at (.*?)\\s+\")\n return self._parse_mac_addr_table(arp_output, arp_regex)",
"def parse_ip_addr(data):\n # 2: eth0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 1500 qdisc pfifo_fast state UP qlen 1000\n # link/ether 52:54:00:a0:b9:b6 brd ff:ff:ff:ff:ff:ff\n # inet 10.133.58.56/20 brd 10.133.63.255 scope global eth0\n # valid_lft 85266sec preferred_lft 85266sec\n # inet6 2001:1bc8:100:6::f301/64 scope global\n # valid_lft forever preferred_lft forever\n for iface in _IP_ADDR_SPLIT_RE.split(data.strip()):\n if not iface:\n continue\n lines = [l.strip() for l in iface.splitlines()]\n info = {\n \"name\": lines.pop(0).partition(\":\")[0],\n \"ip-addresses\": [],\n \"hardware-address\": None,\n }\n for line in lines:\n words = line.split()\n if words[0].startswith(\"link/\") and len(words) >= 2:\n info[\"hardware-address\"] = words[1]\n elif words[0] in (\"inet\", \"inet6\"):\n addrtype = \"ipv6\" if words[0] == \"inet6\" else \"ipv4\"\n addr, _, prefix = words[1].partition(\"/\")\n if prefix == '':\n prefix = 128 if addrtype == \"ipv6\" else 32\n info[\"ip-addresses\"].append({\"ip-address-type\": addrtype, \"ip-address\": addr, \"prefix\": int(prefix)})\n yield info",
"def get_mac_address(self, result, host):\n if \"mac\" in result['scan'][host][\"addresses\"]:\n return result['scan'][host][\"addresses\"][\"mac\"]\n else:\n return \"\"",
"def _mac_addr(address):\n return ':'.join('%02x' % ord(b) for b in address)",
"def get_mac_addr():\n suffix = None\n while suffix is None:\n orig_suffix = input('Enter the remaining 4 character MAC address suffix (e.g. fa34): ')\n # remove all character spacers\n strip_spacers = orig_suffix.maketrans({x: None for x in ':-.'})\n suffix = orig_suffix.translate(strip_spacers)\n\n # check if it's a valid hex string\n invalid_hex = False\n try:\n int(suffix, 16)\n except ValueError:\n invalid_hex = True\n\n if len(suffix) != 4 or invalid_hex:\n print('Invalid MAC address suffix: %s' % orig_suffix)\n suffix = None\n\n mac_addr = MAC_ADDR_OUI + suffix\n mac_addr = '%s%s:%s%s:%s%s:%s%s:%s%s:%s%s' % tuple(mac_addr.lower())\n return mac_addr",
"def get_eos_mac(nbr, nbr_intf):\n if \"port-channel\" in nbr_intf.lower():\n # convert Port-Channel1 to po1\n shell_intf = \"po\" + nbr_intf[-1]\n else:\n # convert Ethernet1 to eth1\n shell_intf = \"eth\" + nbr_intf[-1]\n\n output = nbr['host'].command(\"ip addr show dev %s\" % shell_intf)\n # 8: Ethernet0: <BROADCAST,MULTICAST,UP,LOWER_UP> mtu 9100 ...\n # link/ether a6:69:05:fd:da:5f brd ff:ff:ff:ff:ff:ff\n\n mac = output['stdout_lines'][1].split()[1]\n return {'mac': mac, \"shell_intf\": shell_intf}",
"def get_mac_address():\n eth0_interface = 'eth0'\n addresses = netifaces.ifaddresses(eth0_interface)[netifaces.AF_LINK][0]\n mac_address = addresses['addr']\n return mac_address",
"def get_mac_addresses(self, task):\n raise exception.UnsupportedDriverExtension(\n driver=task.node.driver, extension='get_mac_addresses')",
"def possible_mac_addresses(interface):\n\n mac_addrs = []\n\n # In case of VLANs, just grab the parent interface\n if interface.interface_type == 'vlan':\n interface = interface.parent\n\n # Bonding/bridge: append the MACs of the physical interfaces\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n for slave in interface.all_slaves():\n if slave.mac and (slave.interface_type != \"public\" or slave.bootable):\n mac_addrs.append(slave.mac)\n\n # Handle physical interfaces, and bonding with a dedicated MAC\n # TODO: drop the public/bootable check once we decide how to send the extra\n # information to clients\n if interface.mac and (interface.interface_type != \"public\" or interface.bootable):\n mac_addrs.append(interface.mac)\n\n return mac_addrs",
"def arp_scan(subnet):\n\n answered = scapy.arping(subnet)[0]\n\n machines = []\n for i in answered:\n ip, mac = i[1].psrc, i[1].hwsrc\n try:\n host = socket.gethostbyaddr(i[1].psrc)[0]\n except Exception:\n host = \"??\"\n machines.append({\"ip\": ip, \"mac\": mac, \"host\": host})\n\n return machines",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def mac_addr(address):\n return ':'.join('%02x' % compat_ord(b) for b in address)",
"def arp_scan(interface: str, ip_range: str) -> List[str]:\n ans, unans = srp(Ether(dst='ff:ff:ff:ff:ff:ff') / ARP(pdst=ip_range), iface=interface, timeout=2, verbose=False)\n\n ip_addresses = []\n for req, res in ans:\n ip_addresses.append(req[ARP].pdst)\n\n return ip_addresses",
"def get_my_mac_set(iface_filter=None):\n\n out_set = set()\n if sys.platform.startswith(\"win\"):\n from scapy.arch.windows import NetworkInterface\n if type(iface_filter) == NetworkInterface:\n out_set.add(iface_filter.mac)\n\n for iface in sc.get_if_list():\n if iface_filter is not None and iface != iface_filter:\n continue\n try:\n mac = sc.get_if_hwaddr(iface)\n except Exception as e:\n continue\n else:\n out_set.add(mac)\n\n return out_set",
"def _get_mac_address(self, mac_numbers):\n\n mac = \"\"\n for num in mac_numbers:\n num = self._convert_to_hex(num)\n mac = ':'.join((mac, num))\n mac = mac[1:]\n return mac",
"def set_mac_address(self, iface):\n if os.path.exists(\"/sys/class/net/%s\" % iface):\n return open(\"/sys/class/net/%s/address\" % iface).read().strip()\n return \"none\"",
"def set_mac_address(self, iface):\n if os.path.exists(\"/sys/class/net/%s\" % iface):\n return open(\"/sys/class/net/%s/address\" % iface).read().strip()\n return \"none\"",
"def add_colons_to_mac(self, mac_addr):\n\t\ts = list()\n\t\tfor i in range(12/2): # mac_addr should always be 12 chars, we work in groups of 2 chars\n\t\t\ts.append(mac_addr[i*2:i*2+2])\n\t\tr = \":\".join(s)\n\t\treturn r",
"def mac_aton(str):\n macbytes = [int(i, 16) for i in str.split(':')]\n return struct.pack('6B', *macbytes)",
"def get_mac_address(ifname):\n try:\n return open('/sys/class/net/' + ifname + '/address') \\\n .readline().strip()\n except:\n SysTools.logger.error(\"Failed to get mac-address of %s\", ifname)\n return \"00:00:00:00:00:00\"",
"def get_network_device_mac(devices, user, passwd, base_api_url):\n network_device_mac_address = ''\n\n if devices:\n selected_network_device = generate_network_devices_menu(devices, purpose='DHCP')\n network_device_mac_address = get_mac_address(selected_network_device, base_api_url, user, passwd)\n\n if network_device_mac_address:\n logging.info('device {} mac address is {}'.format(selected_network_device, network_device_mac_address))\n \n return network_device_mac_address"
] | [
"0.5997182",
"0.5887761",
"0.55770844",
"0.54802185",
"0.5151093",
"0.5078019",
"0.5072644",
"0.5054249",
"0.5025796",
"0.5004441",
"0.4980744",
"0.4979718",
"0.4964348",
"0.49386275",
"0.4900247",
"0.48874563",
"0.48327222",
"0.4825868",
"0.4825868",
"0.4825868",
"0.4825868",
"0.48007947",
"0.4800467",
"0.47880268",
"0.4781393",
"0.4781393",
"0.47758192",
"0.4775488",
"0.4773709",
"0.47721717"
] | 0.76599824 | 0 |
Return libvirt domain object for the provided node. | def _get_domain_for_node(self, node):
domain = self.connection.lookupByUUIDString(node.uuid)
return domain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def get_node_domain_otu(self,node):\n if node.clades:\n if node.clades[0].abu < node.clades[1].abu:\n node.domain_otu = self.get_node_domain_otu(node.clades[1]).domain_otu\n self.get_node_domain_otu(node.clades[0])\n else:\n node.domain_otu = self.get_node_domain_otu(node.clades[0]).domain_otu\n self.get_node_domain_otu(node.clades[1])\n return node",
"def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data",
"def domain(self, domain=None):\n\n return self.domain_class(apiobj=self, domainname=domain)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Domain':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DomainArgs.__new__(DomainArgs)\n\n __props__.__dict__[\"access_policies\"] = None\n __props__.__dict__[\"advanced_options\"] = None\n __props__.__dict__[\"advanced_security_options\"] = None\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"cluster_config\"] = None\n __props__.__dict__[\"cognito_options\"] = None\n __props__.__dict__[\"domain_arn\"] = None\n __props__.__dict__[\"domain_endpoint\"] = None\n __props__.__dict__[\"domain_endpoint_options\"] = None\n __props__.__dict__[\"domain_endpoints\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"ebs_options\"] = None\n __props__.__dict__[\"encryption_at_rest_options\"] = None\n __props__.__dict__[\"engine_version\"] = None\n __props__.__dict__[\"log_publishing_options\"] = None\n __props__.__dict__[\"node_to_node_encryption_options\"] = None\n __props__.__dict__[\"off_peak_window_options\"] = None\n __props__.__dict__[\"service_software_options\"] = None\n __props__.__dict__[\"snapshot_options\"] = None\n __props__.__dict__[\"software_update_options\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"vpc_options\"] = None\n return Domain(resource_name, opts=opts, __props__=__props__)",
"def getDomain(self, domain=None):\n if domain is None:\n domain = self.domain\n reply = self.rpc.getDomain(self.username,\n self.password,\n domain)\n if not isinstance(reply, dict):\n raise Exception(\"RPC returned error: \" + reply)\n return reply",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Domain':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DomainArgs.__new__(DomainArgs)\n\n __props__.__dict__[\"app_network_access_type\"] = None\n __props__.__dict__[\"app_security_group_management\"] = None\n __props__.__dict__[\"auth_mode\"] = None\n __props__.__dict__[\"default_space_settings\"] = None\n __props__.__dict__[\"default_user_settings\"] = None\n __props__.__dict__[\"domain_arn\"] = None\n __props__.__dict__[\"domain_id\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"domain_settings\"] = None\n __props__.__dict__[\"home_efs_file_system_id\"] = None\n __props__.__dict__[\"kms_key_id\"] = None\n __props__.__dict__[\"security_group_id_for_domain_boundary\"] = None\n __props__.__dict__[\"single_sign_on_managed_application_instance_id\"] = None\n __props__.__dict__[\"subnet_ids\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"url\"] = None\n __props__.__dict__[\"vpc_id\"] = None\n return Domain(resource_name, opts=opts, __props__=__props__)",
"def _domain(self):\n if self.__domain is None:\n self.__domain = Domain(\n definition='Need domain definition?',\n updatable='False',\n optional='False',\n )\n self._ident[self._domain_name] = self.__domain\n self._data_record.domain_ids = [self._domain_name,]\n return self.__domain",
"def getNodeDNS(self,node):\n data = self.connect('get','nodes/%s/dns' % (node),None)\n return data",
"def domain(self, id_or_name):\n return DomainCollection(self.request).find(id_or_name)",
"def get_domain(self) -> Domain:\n domain = Domain.empty()\n\n # If domain path is None, return an empty domain\n if not self._domain_path:\n return domain\n try:\n domain = Domain.load(self._domain_path)\n except InvalidDomain as e:\n rasa.shared.utils.io.raise_warning(\n f\"Loading domain from '{self._domain_path}' failed. Using \"\n f\"empty domain. Error: '{e}'\"\n )\n\n return domain",
"def GetDicomFromNode(self,node):\n storageNode=node.GetStorageNode()\n if storageNode is not None: # loaded via drag-drop\n filepath=storageNode.GetFullNameFromFileName()\n else: # loaded via DICOM browser\n instanceUIDs=node.GetAttribute('DICOM.instanceUIDs').split()\n filepath=slicer.dicomDatabase.fileForInstance(instanceUIDs[0])\n Dcm_tag=pydicom.dcmread(filepath)\n return Dcm_tag",
"def _get_domain_for_uuid(self, uuid):\n domain = self.connection.lookupByUUIDString(uuid)\n return domain",
"def _get_domain(self, doid):\n SQL = render_template(\"/\".join([self.template_path,\n 'get_domain.sql']),\n doid=doid)\n status, res = self.conn.execute_2darray(SQL)\n\n if not status:\n return False, internal_server_error(errormsg=res)\n if len(res['rows']) == 0:\n raise ObjectGone(self.not_found_error_msg('Domain'))\n\n return res['rows'][0]['schema'], res['rows'][0]['domain']",
"def generate_domain(self, entity, original_data):\n\n got_new_data = False\n\n #: if the domain is directly generated in record method, we just return it\n if isinstance(original_data, self.data_schema):\n got_new_data = True\n return got_new_data, original_data\n\n the_id = self.generate_domain_id(entity, original_data)\n\n #: optional way\n #: item = self.session.query(self.data_schema).get(the_id)\n\n items = get_data(\n data_schema=self.data_schema,\n session=self.session,\n provider=self.provider,\n entity_id=entity.id,\n filters=[self.data_schema.id == the_id],\n return_type=\"domain\",\n )\n\n if items and not self.force_update:\n self.logger.info(\"ignore the data {}:{} saved before\".format(self.data_schema, the_id))\n return got_new_data, None\n\n if not items:\n timestamp_str = original_data[self.get_original_time_field()]\n timestamp = None\n try:\n timestamp = to_pd_timestamp(timestamp_str)\n except Exception as e:\n self.logger.exception(e)\n\n if \"name\" in get_schema_columns(self.data_schema):\n domain_item = self.data_schema(\n id=the_id, code=entity.code, name=entity.name, entity_id=entity.id, timestamp=timestamp\n )\n else:\n domain_item = self.data_schema(id=the_id, code=entity.code, entity_id=entity.id, timestamp=timestamp)\n got_new_data = True\n else:\n domain_item = items[0]\n\n fill_domain_from_dict(domain_item, original_data, self.get_data_map())\n return got_new_data, domain_item",
"def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")",
"def get_domain(self, rel_name):\n return self._declaration[rel_name].domain_type",
"def ex_get_node_by_name(self, name):\n domain = self._get_domain_for_name(name=name)\n node = self._to_node(domain=domain)\n return node",
"def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain",
"def node_show(self, node):\n if node.instance_uuid:\n n = self.ironic_client.node.get_by_instance_uuid(\n node.instance_uuid)\n else:\n n = self.ironic_client.node.get(node.uuid)\n return n",
"def domain(self, domain):",
"def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)",
"def domain( self ):\n raise NotImplementedError(\"domain\")",
"def getNode(self):\n node = Entity.getNode(self)\n node.tag = 'depentity'\n return(node)",
"def info(self):\n #try:\n if True:\n data = xml2dict(self._domain.XMLDesc(8))\n self.logger.debug('Get libvirt domain info: %s' % self._name)\n return data",
"def get_domain(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT domain FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]",
"def get_node_by_id(self, node_name: Hashable):\n return self._meta.get_node(node_name)",
"def get_tree_domain(tree, blacklist=_DOMAIN_BLACKLIST, get_domain=get_domain):\n href = get_base_href(tree)\n if href:\n return get_domain(href)\n return guess_domain(tree, blacklist, get_domain)",
"def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain",
"def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain"
] | [
"0.6646178",
"0.6326023",
"0.60190666",
"0.5995111",
"0.591567",
"0.5859711",
"0.58435315",
"0.57714146",
"0.5689236",
"0.5660096",
"0.564347",
"0.5614556",
"0.5580412",
"0.5555476",
"0.55462366",
"0.54826355",
"0.5459767",
"0.5392127",
"0.5384954",
"0.535872",
"0.53121495",
"0.5298819",
"0.5276818",
"0.5276094",
"0.5267642",
"0.52220863",
"0.5192951",
"0.51790464",
"0.5164744",
"0.5159255"
] | 0.80289376 | 0 |
Return libvirt domain object for the provided uuid. | def _get_domain_for_uuid(self, uuid):
domain = self.connection.lookupByUUIDString(uuid)
return domain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ex_get_node_by_uuid(self, uuid):\n domain = self._get_domain_for_uuid(uuid=uuid)\n node = self._to_node(domain=domain)\n return node",
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def _get_domain_for_node(self, node):\n domain = self.connection.lookupByUUIDString(node.uuid)\n return domain",
"def get_by_uuid(self, uuid):\n return self.get(uuid=uuid)",
"def by_uuid(cls, uuid):\n return dbsession.query(cls).filter_by(uuid=uuid).first()",
"def _get_domain(self, doid):\n SQL = render_template(\"/\".join([self.template_path,\n 'get_domain.sql']),\n doid=doid)\n status, res = self.conn.execute_2darray(SQL)\n\n if not status:\n return False, internal_server_error(errormsg=res)\n if len(res['rows']) == 0:\n raise ObjectGone(self.not_found_error_msg('Domain'))\n\n return res['rows'][0]['schema'], res['rows'][0]['domain']",
"def by_uuid(cls, _uuid):\n return dbsession.query(cls).filter_by(uuid=_uuid).first()",
"def by_uuid(cls, _uuid):\n return dbsession.query(cls).filter_by(uuid=_uuid).first()",
"def get_by_uuid(self, uuid, datastore=None):\n storage = self.storage(datastore)\n model = storage.get_by_uuid(uuid)\n # unless forcing ES datastore, check write storage if not found in read\n # if datastore == 'database' and storage is self.read:\n # Old is above - See C4-30\n # if not specifically specifying datastore=elasticsearch, always fall back to DB\n if not datastore == 'elasticsearch':\n if model is None:\n return self.write.get_by_uuid(uuid)\n return model",
"def api_object_domain(veil_cli):\n return VeilDomainExt(client=veil_cli,\n api_object_id='eafc39f3-ce6e-4db2-9d4e-1d93babcbe26')",
"def domain(self, domain=None):\n\n return self.domain_class(apiobj=self, domainname=domain)",
"def get_article(uuid):\n return Article.get(Article.uuid == uuid)",
"def get_uuid(self, obj):\n return IUUID(obj, None)",
"def get_uuid(self, obj):\n return IUUID(obj, None)",
"def get_device_by_uuid(cls, device_id):\n return cls.dbdriver.get_device_by_uuid(device_id)",
"def iroko_uuid_fetcher(record_uuid, data):\n # pid_field = current_app.config['PIDSTORE_RECID_FIELD']\n pid_field = 'id'\n return FetchedPID(\n provider=providers.IrokoUUIDProvider,\n pid_type=providers.IrokoUUIDProvider.pid_type,\n pid_value=str(data[pid_field]),\n )",
"def get_visitor(uuid):\n log.debug('Getting visitor by uuid(%s)', uuid)\n try:\n visitor = models.Visitor.objects.get(uuid=uuid)\n except Exception:\n visitor = models.Visitor()\n visitor.uuid = uuid\n visitor.save()\n return visitor",
"def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")",
"def find(cls, uuid):\n entries = cls.objects.filter(uuid=uuid)\n if not entries:\n return None\n else:\n return entries.first()",
"def get_employee_by_uuid(uuid):\n employee = db.session.query(Employee).filter_by(uuid=uuid).first()\n if employee is None:\n raise ValueError('Invalid employee uuid')\n return employee",
"def getDomain(self, domain=None):\n if domain is None:\n domain = self.domain\n reply = self.rpc.getDomain(self.username,\n self.password,\n domain)\n if not isinstance(reply, dict):\n raise Exception(\"RPC returned error: \" + reply)\n return reply",
"def domain(self, id_or_name):\n return DomainCollection(self.request).find(id_or_name)",
"def get_by_uuid(self, table, uuid, **kwargs):\n if uuid is None:\n raise UndefinedUuidOrName(table)\n return self.query_one(table, WHERE={'uuid': uuid}, **kwargs)",
"def get_node(self, uuid, clean=True):\n if clean:\n uuid = ProcessNode.strip_uuid(uuid)\n return self._get_tree_queryset().get(uuid_full__startswith=uuid)",
"def get_device_by_uuid(self, device_id: str):\n return get_device_by_uuid(self.api_key, device_id)",
"def get(self, uuid):\n return self.__get_object(super(Pipelines, self).get(uuid))",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Domain':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DomainArgs.__new__(DomainArgs)\n\n __props__.__dict__[\"app_network_access_type\"] = None\n __props__.__dict__[\"app_security_group_management\"] = None\n __props__.__dict__[\"auth_mode\"] = None\n __props__.__dict__[\"default_space_settings\"] = None\n __props__.__dict__[\"default_user_settings\"] = None\n __props__.__dict__[\"domain_arn\"] = None\n __props__.__dict__[\"domain_id\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"domain_settings\"] = None\n __props__.__dict__[\"home_efs_file_system_id\"] = None\n __props__.__dict__[\"kms_key_id\"] = None\n __props__.__dict__[\"security_group_id_for_domain_boundary\"] = None\n __props__.__dict__[\"single_sign_on_managed_application_instance_id\"] = None\n __props__.__dict__[\"subnet_ids\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"url\"] = None\n __props__.__dict__[\"vpc_id\"] = None\n return Domain(resource_name, opts=opts, __props__=__props__)",
"def get_object_by_uuid_or_404(model, uuid_pk):\n if isinstance(uuid_pk, str):\n try:\n uuid.UUID(uuid_pk)\n except Exception as e:\n raise Http404(str(e))\n return get_object_or_404(model, pk=uuid_pk)",
"def _get_from_datastore( uuid ):\n return db.Query(Stats).filter('uuid =', uuid).get()",
"def get_primitive_instance_by_uuid(context, instance_uuid):\n instance = db.instance_get_by_uuid(context, instance_uuid)\n return jsonutils.to_primitive(instance)"
] | [
"0.65978694",
"0.6287508",
"0.62544394",
"0.61970353",
"0.60169226",
"0.57897735",
"0.57860833",
"0.57860833",
"0.5771408",
"0.5730028",
"0.5638017",
"0.5603881",
"0.55803794",
"0.55803794",
"0.55660874",
"0.55613136",
"0.55560535",
"0.5535921",
"0.54868644",
"0.5468099",
"0.5456465",
"0.54487544",
"0.5392285",
"0.53797406",
"0.53689474",
"0.53362364",
"0.53332865",
"0.53317",
"0.53013444",
"0.52986187"
] | 0.8054554 | 0 |
Return libvirt domain object for the provided name. | def _get_domain_for_name(self, name):
domain = self.connection.lookupByName(name)
return domain | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)",
"def domain(self, id_or_name):\n return DomainCollection(self.request).find(id_or_name)",
"def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain",
"def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain",
"def create_or_show_domain(name):\n manager = get_manager()\n domain_id = manager.resolve_domain_id(name)\n if domain_id:\n log(\"Domain '%s' already exists.\" % name, level=DEBUG)\n else:\n manager.create_domain(domain_name=name,\n description='Created by Juju')\n log(\"Created new domain: %s\" % name, level=DEBUG)\n domain_id = manager.resolve_domain_id(name)\n return domain_id",
"def __getattr__(self, name):\n if name in self.domains:\n return self.domains[name]\n\n raise AttributeError('No domain named %s found.' % name)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Domain':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DomainArgs.__new__(DomainArgs)\n\n __props__.__dict__[\"app_network_access_type\"] = None\n __props__.__dict__[\"app_security_group_management\"] = None\n __props__.__dict__[\"auth_mode\"] = None\n __props__.__dict__[\"default_space_settings\"] = None\n __props__.__dict__[\"default_user_settings\"] = None\n __props__.__dict__[\"domain_arn\"] = None\n __props__.__dict__[\"domain_id\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"domain_settings\"] = None\n __props__.__dict__[\"home_efs_file_system_id\"] = None\n __props__.__dict__[\"kms_key_id\"] = None\n __props__.__dict__[\"security_group_id_for_domain_boundary\"] = None\n __props__.__dict__[\"single_sign_on_managed_application_instance_id\"] = None\n __props__.__dict__[\"subnet_ids\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"url\"] = None\n __props__.__dict__[\"vpc_id\"] = None\n return Domain(resource_name, opts=opts, __props__=__props__)",
"def get(resource_name: str,\n id: pulumi.Input[str],\n opts: Optional[pulumi.ResourceOptions] = None) -> 'Domain':\n opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))\n\n __props__ = DomainArgs.__new__(DomainArgs)\n\n __props__.__dict__[\"access_policies\"] = None\n __props__.__dict__[\"advanced_options\"] = None\n __props__.__dict__[\"advanced_security_options\"] = None\n __props__.__dict__[\"arn\"] = None\n __props__.__dict__[\"cluster_config\"] = None\n __props__.__dict__[\"cognito_options\"] = None\n __props__.__dict__[\"domain_arn\"] = None\n __props__.__dict__[\"domain_endpoint\"] = None\n __props__.__dict__[\"domain_endpoint_options\"] = None\n __props__.__dict__[\"domain_endpoints\"] = None\n __props__.__dict__[\"domain_name\"] = None\n __props__.__dict__[\"ebs_options\"] = None\n __props__.__dict__[\"encryption_at_rest_options\"] = None\n __props__.__dict__[\"engine_version\"] = None\n __props__.__dict__[\"log_publishing_options\"] = None\n __props__.__dict__[\"node_to_node_encryption_options\"] = None\n __props__.__dict__[\"off_peak_window_options\"] = None\n __props__.__dict__[\"service_software_options\"] = None\n __props__.__dict__[\"snapshot_options\"] = None\n __props__.__dict__[\"software_update_options\"] = None\n __props__.__dict__[\"tags\"] = None\n __props__.__dict__[\"vpc_options\"] = None\n return Domain(resource_name, opts=opts, __props__=__props__)",
"def ex_get_node_by_name(self, name):\n domain = self._get_domain_for_name(name=name)\n node = self._to_node(domain=domain)\n return node",
"def _find_domain(self, domain_name: str) -> digitalocean.Domain:\n\n domain_name_guesses = dns_common.base_domain_name_guesses(domain_name)\n\n domains = self.manager.get_all_domains()\n\n for guess in domain_name_guesses:\n matches = [domain for domain in domains if domain.name == guess]\n\n if matches:\n domain = matches[0]\n logger.debug('Found base domain for %s using name %s', domain_name, guess)\n return domain\n\n raise errors.PluginError(f'Unable to determine base domain for {domain_name} using names: '\n f'{domain_name_guesses}.')",
"def GetDomain(self, domainName):\n\n response = self.client.http_get(\"/v4/domains/%s\" % domainName)\n\n return parse_response(response, Domain)",
"def create_domain(self, domain_name):\r\n params = {'DomainName':domain_name}\r\n d = self.get_object('CreateDomain', params, Domain)\r\n d.name = domain_name\r\n return d",
"def create_domain(self, domain_name):\n params = {'DomainName': domain_name}\n d = self.get_object('CreateDomain', params, Domain)\n d.name = domain_name\n return d",
"def get(cls, subdomain, name):\n return cls.get_by_key_name(subdomain + ':' + name)",
"def get_domain(self, rel_name):\n return self._declaration[rel_name].domain_type",
"def get_obj_by_name(name: str) -> Any:\r\n module, obj_name = Onrolux.get_module_from_obj_name(name)\r\n return get_obj_from_module(module, obj_name)",
"def get_by_name(name):\n return database.get_all(Domain, name, field=\"name\").all()",
"def create_domain_name(self, name):\n return (\"%s.%s.%s\" % (name, \"net\", self.domain)).lower()",
"def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")",
"def by_name(name, create_user=True):\n return get_obj_by_name(OBJT_HOST, name, create_user)",
"def find_domain(self):\n for network in self.network_set.all():\n if network.site:\n expected_name = \"{0}.{1}.mozilla.com\".format(self.name,\n network.site.get_site_path())\n try:\n domain = Domain.objects.get(name=expected_name)\n except ObjectDoesNotExist, e:\n continue\n return domain.name\n\n return None",
"def domain(self, domain=None):\n\n return self.domain_class(apiobj=self, domainname=domain)",
"def get_object(self, name):\n try:\n return self.data['objects'][normalize_object_name(name)]\n except KeyError:\n return None",
"def get_object(self, name):\n return self._internal.objects[name]",
"def get_domain_name(self, DomainName: str) -> Dict:\n pass",
"def get_service_by_domain_name(self, domain_name):\n try:\n service_details = self.storage_controller\\\n .get_service_details_by_domain_name(domain_name)\n if service_details is None:\n # as per latest change, get_service_details_by_domain_name\n # will return None if the service_details can not be found\n # for this domain\n raise LookupError\n except Exception:\n raise LookupError(u'Domain {0} does not exist'.format(\n domain_name))\n return service_details",
"def _get_real_object(self, name):\n name = name if isinstance(name, str) else name.name\n for obj in self._objects:\n if name == obj.name:\n return obj\n else:\n raise ValueError(\"Cannot retrieve object. Unknown name {}. \".format(name))",
"def get_by_name(cls, name):\n return cls.query.filter(cls.name == name).first()",
"def _get_domain(self, doid):\n SQL = render_template(\"/\".join([self.template_path,\n 'get_domain.sql']),\n doid=doid)\n status, res = self.conn.execute_2darray(SQL)\n\n if not status:\n return False, internal_server_error(errormsg=res)\n if len(res['rows']) == 0:\n raise ObjectGone(self.not_found_error_msg('Domain'))\n\n return res['rows'][0]['schema'], res['rows'][0]['domain']",
"def get_domain(self) -> Domain:\n domain = Domain.empty()\n\n # If domain path is None, return an empty domain\n if not self._domain_path:\n return domain\n try:\n domain = Domain.load(self._domain_path)\n except InvalidDomain as e:\n rasa.shared.utils.io.raise_warning(\n f\"Loading domain from '{self._domain_path}' failed. Using \"\n f\"empty domain. Error: '{e}'\"\n )\n\n return domain"
] | [
"0.7728729",
"0.6781407",
"0.6676108",
"0.66732156",
"0.64450073",
"0.6421315",
"0.6407042",
"0.6315708",
"0.62968487",
"0.62832683",
"0.62424284",
"0.6150704",
"0.60960394",
"0.6010242",
"0.5997326",
"0.5948612",
"0.58892924",
"0.5876968",
"0.5854081",
"0.5805325",
"0.5765809",
"0.57580274",
"0.5748801",
"0.5665114",
"0.56637686",
"0.56634665",
"0.5647462",
"0.56467265",
"0.56432796",
"0.56330895"
] | 0.7796097 | 0 |
Sets up the regexp for parsing out IP addresses from the 'arp an' command and pass it along to the parser function. | def _parse_ip_table_arp(self, arp_output):
arp_regex = re.compile(r".*?\((.*?)\) at (.*?)\s+")
return self._parse_mac_addr_table(arp_output, arp_regex) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def callback(self, pkt):\n if ARP in pkt:\n self.parse_ip(pkt.sprintf(\"%ARP.psrc%\"))\n if TCP in pkt or UDP in pkt:\n self.parse_ip(pkt.sprintf(\"%IP.src%\"))\n self.parse_ip(pkt.sprintf(\"%IP.dst%\"))",
"def arp_parse(data):\n\t# Iteratize pkt\n\tpkt = packet.Packet(data)\n\ti = iter(pkt)\n\teth_pkt = next(i)\n\t\t# Ensure it's an ethernet frame.\n\tassert isinstance(eth_pkt, ethernet.ethernet)\n\n\tarp_pkt = next(i)\n\tif not isinstance(arp_pkt, arp.arp):\n\t\traise ARPPacket.ARPUnknownFormat()\n\n\tif arp_pkt.opcode not in (ARP_REQUEST, ARP_REPLY):\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported opcode %d' % arp_pkt.opcode)\n\n\tif arp_pkt.proto != ETH_TYPE_IP:\n\t\traise ARPPacket.ARPUnknownFormat(\n\t\t\tmsg='unsupported arp ethtype 0x%04x' % arp_pkt.proto)\n\n\treturn arp_pkt",
"def _parse_mac_addr_table(self, cmd_output, mac_regex):\n lines = ensure_string(cmd_output).split(\"\\n\")\n\n arp_table = defaultdict(list)\n for line in lines:\n match = mac_regex.match(line)\n\n if not match:\n continue\n\n groups = match.groups()\n ip_address = groups[0]\n mac_address = groups[1]\n arp_table[mac_address].append(ip_address)\n\n return arp_table",
"def arping(iprange=\"10.0.1.0/24\"):\n\n conf.verb=0\n ans,unans=srp(Ether(dst=\"ff:ff:ff:ff:ff:ff\")/ARP(pdst=iprange),\n timeout=2)\n\n collection = []\n for snd, rcv in ans:\n result = rcv.sprintf(r\"%ARP.psrc% %Ether.src%\").split()\n collection.append(result)\n return collection",
"def address_regex(self) -> Any:",
"def resolv_mac(self, row, ip):\n\n\t\tself.icmp_probe(ip)\n\n\t\tcmd = 'arp -a %s' % ip\n\t\tp = Popen(cmd, shell=True, stdin=PIPE, stderr=PIPE, stdout=PIPE)\n\t\tres = p.stdout.read()\n\n\t\tres = res.decode().strip('\\r\\n')\n\n\t\tmac = ''\n\t\tif 'No ARP' not in res:\n\t\t\tres = res.strip('\\r\\n').replace('\\r', '').split('\\n')[-1:][0].strip(' ').split(' ')\n\t\t\tfine = []\n\t\t\tfor i in res:\n\t\t\t\tif len(i) > 5:\n\t\t\t\t\tfine.append(i)\n\t\t\tmac = (fine[1] if (len(fine) == 3) else 'unknown')\n\t\t\t\n\t\telse:\n\t\t\tmac = 'unknown'\n\n\t\tself.mac_resolved.emit(row, mac)",
"def arping(ipaddress=\"10.0.1.1\"):\n\n #Assuming use of arping on Red Hat Linux\n p = subprocess.Popen(\"/usr/sbin/arping -c 2 %s\" % ipaddress, shell=True,\n stdout=subprocess.PIPE)\n out = p.stdout.read()\n result = out.split()\n #pattern = re.compile(\":\")\n for item in result:\n if ':' in item:\n print(item)",
"def arp_announce(self):\n pass",
"def parse_aprs (packet):\n\n print (packet)\n if len(packet) == 0:\n return\n\n chan = ''\n # Split into address and information parts.\n # There could be a leading '[n]' with a channel number.\n m = re.search (r'^(\\[.+\\] *)?([^:]+):(.+)$', packet)\n if m:\n chan = m.group(1)\t# Still enclosed in [].\n addrs = m.group(2)\n info = m.group(3)\n #print ('<>'+addrs+'<>'+info+'<>')\n\n if info[0] == '}':\n # Unwrap third party traffic format\n # Preserve any channel.\n if chan:\n parse_aprs (chan + info[1:])\n else:\n parse_aprs (info[1:])\n elif info[0:3] == '{DE':\n # APRS \"user defined data\" format for EAS.\n #print ('Process \"message\" - ' + info)\n process_eas (chan, info[3:])\n else:\n print ('Not APRS \"user defined data\" format - ' + info)\n else:\n print ('Could not split into address & info parts - ' + packet)",
"def MatchIpAddressInArpPackets(self):\n return self._get_attribute('matchIpAddressInArpPackets')",
"def arp_scan(interface: str, ip_range: str) -> List[str]:\n ans, unans = srp(Ether(dst='ff:ff:ff:ff:ff:ff') / ARP(pdst=ip_range), iface=interface, timeout=2, verbose=False)\n\n ip_addresses = []\n for req, res in ans:\n ip_addresses.append(req[ARP].pdst)\n\n return ip_addresses",
"def prepare_regexps(self):\r\n print(\"Preparing regular expressions for this session.\")\r\n privmsg_parse = re.compile(\"\")",
"def test_ignore_non_arp_packets(self):\n packet = IP(dst='www.apple.com') / TCP(dport=80) / Raw(b'test')\n\n chef = ARPChef()\n dumpling = chef.packet_handler(packet)\n\n assert chef.ip_mac == {}\n assert dumpling is None",
"def parse_ip(self, ip):\n if not ip in self.ip_list:\n try:\n ip_address = ipaddress.ip_address(ip)\n use = not (\n ip_address.is_multicast or ip_address.is_unspecified or ip_address.is_reserved or ip_address.is_loopback or ip_address.is_link_local)\n if use and (self.include_public or ip_address.is_private):\n self.new_ip(ip)\n network = ipaddress.IPv4Network(\"{}/{}\".format(ip,\n self.netmask), strict=False)\n self.new_range(str(network))\n except ValueError:\n pass",
"def test_packet_handler_arp_request(self):\n packet = Ether() / ARP(op='who-has')\n arp = packet[ARP]\n\n chef = ARPChef()\n dumpling = chef.packet_handler(packet)\n\n assert dumpling == {\n 'operation': 'request',\n 'src_hw': arp.hwsrc,\n 'src_ip': arp.psrc,\n 'dst_hw': arp.hwdst,\n 'dst_ip': arp.pdst,\n 'time': arp.time,\n 'notes': None,\n }\n\n assert chef.ip_mac == {}",
"def get_arp_table(self, vrf=\"\"):\n\n arp_table = []\n output = self._send_command('/ip arp print terse')\n\n arps = parse_terse_output(output)\n\n for arp in arps:\n if arp.get('mac-address'):\n arp_table.append({\n 'interface': arp.get('interface'),\n 'mac': cast_mac(arp.get('mac-address')),\n 'ip': arp.get('address'),\n 'age': -1.0,\n })\n\n return arp_table",
"def main():\n\n p = optparse.OptionParser(description=' Finds Mac Address of IP address(es)',\n prog='pyarping',\n version='pyarping 0.1',\n usage='%prog [10.0.1.1 or 10.0.1.0/24]')\n\n options, arguments = p.parse_args()\n if len(arguments) == 1:\n values = arping(iprange=arguments)\n for ip, mac in values:\n print ip, mac\n else:\n p.print_help()",
"def scan(ip):\n arp_request = scapy.ARP(pdst=ip)\n broadcast = scapy.Ether(dst=\"ff:ff:ff:ff:ff:ff\")\n arp_request_broadcast = broadcast/arp_request\n answered_list = scapy.srp(arp_request_broadcast , timeout = 1, verbose=False)[0]\n target_list=[]\n for element in answered_list:\n target_dict = {\"ip\":element[1].psrc, \"mac\":element[1].hwsrc}\n target_list.append(target_dict)\n return target_list",
"def handle_filter(packets, arg, arguments):\r\n matched_packets = []\r\n if arg == \"host\":\r\n if len(arguments) == 0:\r\n print(\"A host IP address should be followed by the host command.\")\r\n sys.exit()\r\n else:\r\n # ip address here\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n dest_ip = pkt[1][10]\r\n src_ip = pkt[1][9]\r\n if arg == dest_ip or arg == src_ip:\r\n matched_packets.append(pkt)\r\n elif arg == \"ip\":\r\n for pkt in packets:\r\n if str(pkt[0][3]) == \"0800\":\r\n matched_packets.append(pkt)\r\n elif arg == \"port\":\r\n if len(arguments) == 0:\r\n print(\"\\\"port\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n # port number\r\n arg = arguments.popleft()\r\n\r\n for pkt in packets:\r\n if pkt[1][7] == 6 or pkt[1][7] == 17:\r\n if str(pkt[2][0]) == arg or str(pkt[2][1]) == arg:\r\n matched_packets.append(pkt)\r\n\r\n elif arg == \"tcp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 6:\r\n matched_packets.append(pkt)\r\n elif arg == \"udp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 17:\r\n matched_packets.append(pkt)\r\n elif arg == \"icmp\":\r\n for pkt in packets:\r\n if pkt[1][7] == 1:\r\n matched_packets.append(pkt)\r\n elif arg == \"net\":\r\n if len(arguments) == 0:\r\n print(\"\\\"net net\\\" is required. \")\r\n sys.exit()\r\n else:\r\n # ip prefix\r\n arg = arguments.popleft()\r\n if len(arg.split(\".\")) != 4:\r\n print(\"Please enter a valid ip address format. (x.x.x.x)\")\r\n sys.exit()\r\n prefix_length = 0\r\n length = len(arg)\r\n if arg == \"0.0.0.0\":\r\n prefix_length = 0\r\n elif arg[length - 6:length] == \".0.0.0\":\r\n prefix_length = length - 6\r\n elif arg[length - 4:length] == \".0.0\":\r\n prefix_length = length - 4\r\n elif arg[length - 2:length] == \".0\":\r\n prefix_length = length - 2\r\n else:\r\n prefix_length = length\r\n\r\n for pkt in packets:\r\n if pkt[1][9][0:prefix_length] == arg[0:prefix_length] or pkt[1][10][0:prefix_length] == \\\r\n arg[0:prefix_length]:\r\n matched_packets.append(pkt)\r\n\r\n elif arg == \"not\":\r\n if len(arguments) == 0:\r\n print(\"\\\"not\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n arg = arguments.popleft()\r\n if arg == \"host\":\r\n if len(arguments) == 0:\r\n print(\"A host IP address should be followed by the host command.\")\r\n sys.exit()\r\n else:\r\n # ip address here\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n dest_ip = pkt[1][10]\r\n src_ip = pkt[1][9]\r\n if arg != dest_ip and arg != src_ip:\r\n matched_packets.append(pkt)\r\n elif arg == \"ip\":\r\n for pkt in packets:\r\n if str(pkt[0][3]) != \"0800\":\r\n matched_packets.append(pkt)\r\n elif arg == \"port\":\r\n if len(arguments) == 0:\r\n print(\"\\\"port\\\" cannot be the last argument.\")\r\n sys.exit()\r\n else:\r\n # port number\r\n arg = arguments.popleft()\r\n for pkt in packets:\r\n if pkt[1][7] == 6 or pkt[1][7] == 17:\r\n if str(pkt[2][0]) != arg and str(pkt[2][1]) != arg:\r\n matched_packets.append(pkt)\r\n elif arg == \"tcp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 6:\r\n matched_packets.append(pkt)\r\n elif arg == \"udp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 17:\r\n matched_packets.append(pkt)\r\n elif arg == \"icmp\":\r\n for pkt in packets:\r\n if pkt[1][7] != 1:\r\n matched_packets.append(pkt)\r\n elif arg == \"net\":\r\n if len(arguments) == 0:\r\n print(\"\\\"net net\\\" is required. \")\r\n sys.exit()\r\n else:\r\n # ip prefix\r\n arg = arguments.popleft()\r\n if len(arg.split(\".\")) != 4:\r\n print(\"Please enter a valid ip address format. (x.x.x.x)\")\r\n sys.exit()\r\n prefix_length = 0\r\n\r\n length = len(arg)\r\n if arg == \"0.0.0.0\":\r\n prefix_length = 0\r\n\r\n elif arg[length - 6:length] == \".0.0.0\":\r\n\r\n prefix_length = length - 6\r\n elif arg[length - 4:length] == \".0.0\":\r\n prefix_length = length - 4\r\n elif arg[length - 2:length] == \".0\":\r\n prefix_length = length - 2\r\n else:\r\n prefix_length = length\r\n for pkt in packets:\r\n if pkt[1][9][0:prefix_length] != arg[0:prefix_length] and pkt[1][10][0:prefix_length] != \\\r\n arg[0:prefix_length]:\r\n matched_packets.append(pkt)\r\n\r\n return matched_packets, arg",
"def handle_arp_packet(self, pkt):\r\n # Check that the protocol type is 0x9999 - Hardcoded and made up number.\r\n if pkt[YOARP].ptype == 0x9999:\r\n # Check if the operation is a question\r\n if pkt[YOARP].op == 1:\r\n # Answer for ARPs that are directed for us\r\n # (if we already registered a YO address)\r\n if self.yo_addr:\r\n if pkt[YOARP].pdst == self.yo_addr:\r\n # NOTE: This is really weird - pkt.src. I'd expect pkt[Ether].src but it just wouldn't\r\n # work. So this does.\r\n # For an explanation of the next two lines please refer to the \"register\" function.\r\n self.prints(\"Answering ARP from %s\" % pkt[YOARP].psrc, 3)\r\n arp_answer = Ether(src=self.src_mac, dst=pkt.src) / YOARP(psrc=self.yo_addr, pdst=pkt[YOARP].psrc,\r\n op=2, hwsrc=self.src_mac,\r\n hwdst=pkt[YOARP].hwsrc)\r\n # sendp(arp_answer)\r\n self.L2socket.send(arp_answer)\r\n\r\n\r\n # Passive cache ARPs\r\n # This is bad on purpose so that attacks can get interesting (Even though ARP attacks are\r\n # not a part of Gvahim curriculum, so don't exploit this!).\r\n # Unless we already have something cached (which should be 120 seconds), we simply check if\r\n # op is 2 (answer) and can inject pretty much whatever we want. This is a serious implementation bug.\r\n # What if we inject 00:00:00:00:00:00 or ff:ff:ff:ff:ff:ff? What if we hijack our own IP?\r\n # That's where things can get messy.\r\n\r\n if pkt[YOARP].op == 2:\r\n # Check against scapy's internal yoarp_cache.\r\n mac = conf.netcache.yoarp_cache.get(pkt[YOARP].psrc)\r\n if mac is None:\r\n conf.netcache.yoarp_cache[pkt[YOARP].psrc] = pkt[YOARP].hwsrc",
"def get_arp_table():\n IP = ''\n login = ''\n password = ''\n telnet = pexpect.spawn('telnet {}'.format(IP), timeout=30)\n telnet.expect('Username:')\n telnet.sendline(login)\n telnet.expect('Password:')\n telnet.sendline(password)\n telnet.expect('#')\n telnet.sendline('terminal length 0')\n telnet.expect('#')\n telnet.sendline('show arp')\n telnet.expect('#')\n arp_table = telnet.before.decode('utf-8')\n telnet.close()\n return arp_table",
"def integrated_address_regex(self) -> Any:",
"def _arp(ip_address):\n cmd = ['arp', '-n', ip_address]\n arp = subprocess.Popen(cmd, stdout=subprocess.PIPE)\n out, _ = arp.communicate()\n match = re.search(r'(([0-9A-Fa-f]{1,2}\\:){5}[0-9A-Fa-f]{1,2})', str(out))\n if match:\n return match.group(0)\n _LOGGER.info('No MAC address found for %s', ip_address)\n return None",
"def test_tagged_arp_pkt(ptfadapter, duthosts, rand_one_dut_hostname,\n rand_selected_dut, tbinfo, ports_list): # noqa F811\n duthost = duthosts[rand_one_dut_hostname]\n router_mac = duthost.facts['router_mac']\n vlan_ports_list = running_vlan_ports_list(duthosts, rand_one_dut_hostname, rand_selected_dut, tbinfo, ports_list)\n for vlan_port in vlan_ports_list:\n port_index = vlan_port[\"port_index\"][0]\n # Send GARP packets to switch to populate the arp table with dummy MACs for each port\n # Totally 10 dummy MACs for each port, send 1 packet for each dummy MAC\n # ARP table will be cleaned up before each iteration, so there won't be any conflict MAC and IP\n dummy_macs = ['{}:{:02x}:{:02x}'.format(DUMMY_MAC_PREFIX, port_index & 0xFF, i+1)\n for i in range(DUMMY_ARP_COUNT)]\n dummy_ips = ['{}.{:d}.{:d}'.format(DUMMY_IP_PREFIX, port_index & 0xFF, i+1)\n for i in range(DUMMY_ARP_COUNT)]\n for permit_vlanid in map(int, vlan_port[\"permit_vlanid\"]):\n logger.info('Test ARP: interface %s, VLAN %u' % (vlan_port[\"dev\"], permit_vlanid))\n # Perform ARP clean up\n arp_cleanup(duthost)\n for i in range(DUMMY_ARP_COUNT):\n pkt = build_arp_packet(permit_vlanid, dummy_macs[i], router_mac, dummy_ips[i])\n logger.info(\"Send tagged({}) packet from {} ...\".format(permit_vlanid, port_index))\n testutils.send(ptfadapter, port_index, pkt)\n\n try:\n res = duthost.command('show arp')\n assert res['rc'] == 0\n logger.info('\"show arp\" output on DUT:\\n{}'.format(pprint.pformat(res['stdout_lines'])))\n\n arp_cnt = 0\n for arp_entry in res['stdout_lines']:\n # Address MacAddress Iface Vlan\n items = arp_entry.split()\n if len(items) != 4:\n continue\n # Vlan must be number\n if not items[3].isdigit():\n continue\n arp_cnt += 1\n ip = items[0]\n mac = items[1]\n ifname = items[2]\n vlan_id = int(items[3])\n assert ip in dummy_ips\n assert mac in dummy_macs\n # 'show arp' command gets iface from FDB table,\n # if 'show arp' command was earlier than FDB table update, ifname would be '-'\n if ifname == '-':\n logger.info('Ignore unknown iface...')\n else:\n assert ifname == vlan_port[\"dev\"]\n assert vlan_id == permit_vlanid\n assert arp_cnt == DUMMY_ARP_COUNT, \"Expect {} entries, but {} found\".format(DUMMY_ARP_COUNT, arp_cnt)\n except Exception as detail:\n logger.error(\"Except: {}\".format(detail))\n raise",
"def _detect_ap_dynamic_addresses(self, ap_mac_list):\n lease_data = self.components['LinuxServer'].get_dhcp_leases()\n map = {}\n ip = mac = start_time = None\n for line in lease_data:\n l = line.split()\n if l[0] == \"lease\":\n ip = l[1]\n\n elif l[0] == \"hardware\":\n mac = l[2].strip(\";\")\n\n elif l[0] == \"starts\":\n start_time_str = \" \".join(l[2:]).strip(\";\")\n start_time = time.mktime(time.strptime(start_time_str, \"%Y/%m/%d %H:%M:%S\"))\n\n if ip and mac and start_time:\n if map.has_key(mac):\n if map[mac][0] < start_time:\n map[mac] = (start_time, ip)\n\n else:\n map[mac] = (start_time, ip)\n\n ip = mac = start_time = None\n\n for ap_mac in ap_mac_list:\n if map.has_key(ap_mac.lower()):\n self.mac_to_ip[ap_mac.lower()] = map[ap_mac.lower()][1]\n\n else:\n raise Exception(\"IP entry of the MAC %s was not found in the \\\n dhcpd.leases file\" % ap_mac.lower())",
"def test_packet_handler_arp_reply_new_ip(self):\n packet = Ether() / ARP(op='is-at')\n arp = packet[ARP]\n\n chef = ARPChef()\n\n # Configure the ip_mac struct to think it's already seen the source.\n chef.ip_mac = {\n arp.psrc: 'old_ip',\n }\n\n dumpling = chef.packet_handler(packet)\n\n # We should have updated the ip_mac structure with the new ip address.\n assert chef.ip_mac[arp.psrc] == arp.hwsrc\n\n # Check dumpling payload, including 'notes'.\n assert dumpling == {\n 'operation': 'reply',\n 'src_hw': arp.hwsrc,\n 'src_ip': arp.psrc,\n 'dst_hw': arp.hwdst,\n 'dst_ip': arp.pdst,\n 'time': arp.time,\n 'notes': 'source device has new IP address',\n }",
"def arp_scan(subnet):\n\n answered = scapy.arping(subnet)[0]\n\n machines = []\n for i in answered:\n ip, mac = i[1].psrc, i[1].hwsrc\n try:\n host = socket.gethostbyaddr(i[1].psrc)[0]\n except Exception:\n host = \"??\"\n machines.append({\"ip\": ip, \"mac\": mac, \"host\": host})\n\n return machines",
"def __init__(self, regexp, handler, quick=0):\n\n self.regexp = compile(regexp)\n self.handler = handler\n self.quick = quick",
"def vlan_iface_regex_doctests():\n pass",
"def __call__(self, parser, namespace, values, option_string=None):\n ip_split = values.split(\",\")\n [ip_address(ip) for ip in ip_split]\n setattr(namespace, self.dest, ip_split)"
] | [
"0.6047332",
"0.5977841",
"0.58359456",
"0.55522436",
"0.549954",
"0.54692423",
"0.5399935",
"0.53717846",
"0.5354695",
"0.5282927",
"0.52803683",
"0.5258503",
"0.5195545",
"0.51795524",
"0.51735955",
"0.5168112",
"0.5160153",
"0.5160032",
"0.5127986",
"0.5110216",
"0.51080203",
"0.51073694",
"0.5069773",
"0.50249",
"0.49947947",
"0.49710232",
"0.49662167",
"0.4961179",
"0.49496582",
"0.4925836"
] | 0.67209846 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.