query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Computes the elementwise minimum of some matrices.
def sim_min(sim_mats): return np.array(sim_mats).min(axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def matrix_min(data):\n if is_SparseDataFrame(data):\n data = [np.min(data[col]) for col in data.columns]\n elif is_sparse_dataframe(data):\n data = [sparse_series_min(data[col]) for col in data.columns]\n elif isinstance(data, pd.DataFrame):\n data = np.min(data)\n elif isinstance(data, sparse.lil_matrix):\n data = [np.min(d) for d in data.data] + [0]\n elif isinstance(data, sparse.dok_matrix):\n data = list(data.values()) + [0]\n elif isinstance(data, sparse.dia_matrix):\n data = [np.min(data.data), 0]\n return np.min(data)", "def l1Min(A, b):\n #set up the matrices\n m,n = A.shape\n solvers.options['show_progress'] = False\n c = np.concatenate([np.ones(n),np.zeros(n)]).astype(float)\n G = np.vstack((np.hstack((-np.eye(n),np.eye(n))), np.hstack((-np.eye(n),-np.eye(n))),np.hstack((-np.eye(n),np.zeros((n,n))))))\n h = np.zeros(3*n).astype(float)\n A = np.hstack((np.zeros((m,n)),A)).astype(float)\n #convert the matrices\n c = matrix(c)\n G = matrix(G)\n h = matrix(h)\n A = matrix(A)\n b = matrix(b.astype(float))\n #solve the matrices\n sol = solvers.lp(c, G, h,A,b)\n\n return np.ravel(sol['x'][n:]),sol['primal objective']", "def min(*x, na_rm: bool = False) -> Any:\n fun = numpy.nanmin if na_rm else numpy.min\n x = Collection(*x) # flatten\n return fun(x)", "def compare_min(values, weights):\n return np.min(values.numpy())", "def min(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.min, reduce_instance_dims, name)", "def scalar_min(self, dst, src0, src1):\n return self._scalar_binary_func('min', dst, src0, src1)", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def MIN(*args):\n return _group_function(min, *args)", "def minimum(x,y,z):\r\n\treturn min(min(x,y),z)", "def minij(n):\n o = np.outer(np.ones(n), np.arange(1, n + 1))\n ot = o.T\n a = np.where(o < ot, o, ot)\n return a", "def find_min(self, A, w):\n import numpy as np\n\n vcost = self.INFINITY\n vto = vfrom = -1\n for v in w:\n # Get array offset of minimum of this vertex\n i = np.argmin(A[v,:])\n if A[v,i] < vcost:\n vcost = A[v,i]\n vto = i\n vfrom = v\n return (vfrom, vto, vcost)", "def d_min(x, y):\n axis = np.argmax(x.shape)\n return np.min(np.array([x, y]), axis=axis)", "def pmin(\n *x: NumericType,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([min(elem, na_rm=na_rm) for elem in zip(*x)])", "def argmin(a, *args, **kwargs):\n warn('The function argmin is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n if isinstance(a, np.ma.MaskedArray):\n return np.ma.argmin(a, *args, **kwargs)\n elif isinstance(a, np.ndarray):\n return np.argmin(a, *args, **kwargs)\n else:\n return _argmin(a)", "def minimum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.minimum, _crank16.minimum, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)", "def nanmin(a, axis=None):\n y = array(a,subok=True)\n if not issubclass(y.dtype.type, _nx.integer):\n y[isnan(a)] = _nx.inf\n return y.min(axis)", "def argmin(self, values):\n return self.aggregate(values, \"argmin\")", "def mini(a, b):\n return min(a, b)", "def find_fmin_on_grid(f, xs, args, full_output):\n Nx = len(xs)\n Jout = np.zeros(Nx)\n for k in range(Nx):\n Jout[k] = f(xs[k], *args)\n idx = np.nanargmin(Jout)\n if not full_output:\n return xs[idx], Jout[idx]\n return xs[idx], Jout[idx], xs, Jout", "def arrmin(a):\n # could set arrmin = amin in scipy if scipy is installed\n try:\n return min(a.flat)\n except AttributeError:\n # not a NumPy array\n if isinstance(a, collections.Sequence):\n return min(a)\n elif isinstance(a, numbers.Number):\n return a\n else:\n raise TypeError('arrmin of %s not supported' % type(a))", "def smallest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a < b, a, b)\r\n else:\r\n return min(stack(*args), axis=0)", "def argmin(x1, axis=None, out=None):\n\n x1_desc = dpnp.get_dpnp_descriptor(x1, copy_when_nondefault_queue=False)\n if x1_desc:\n if axis is not None:\n pass\n elif out is not None:\n pass\n else:\n result_obj = dpnp_argmin(x1_desc).get_pyobj()\n result = dpnp.convert_single_elem_array_to_scalar(result_obj)\n\n return result\n\n return call_origin(numpy.argmin, x1, axis, out)", "def atmin(a,lowerlimit=None,dimension=None,inclusive=1):\r\n if inclusive: lowerfcn = N.greater\r\n else: lowerfcn = N.greater_equal\r\n if dimension == None:\r\n a = N.ravel(a)\r\n dimension = 0\r\n if lowerlimit == None:\r\n lowerlimit = N.minimum.reduce(N.ravel(a))-11\r\n biggest = N.maximum.reduce(N.ravel(a))\r\n ta = N.where(lowerfcn(a,lowerlimit),a,biggest)\r\n return N.minimum.reduce(ta,dimension)", "def minimum_inplace(a, b):", "def nanmin(self, axis=0, **kwargs) -> \"Dataset\":\n return self.aggregate(axis=axis, func=np.nanmin, **kwargs)", "def minimum(self):\n \n omega_star = fmin(self.function, 0, disp=False)[0]\n loss = self.function(omega_star)\n return omega_star, loss", "def Vmin(V):\n return np.min(V)", "def minimum(x, y):\r\n # see decorator for function body\r", "def nanmin_nb(a):\n out = np.empty(a.shape[1], dtype=np.float_)\n for col in range(a.shape[1]):\n out[col] = np.nanmin(a[:, col])\n return out", "def structured_minimum(x, y):\r\n # see decorator for function body\r" ]
[ "0.6745464", "0.6651574", "0.6647899", "0.64386827", "0.6284703", "0.6036429", "0.6033137", "0.5970848", "0.5958427", "0.5948367", "0.5947503", "0.59440386", "0.59381294", "0.5921218", "0.59118783", "0.5897402", "0.5858605", "0.5849506", "0.58406216", "0.5815177", "0.5813334", "0.580042", "0.5780769", "0.5776347", "0.5771414", "0.5746883", "0.57168865", "0.570722", "0.5706416", "0.57015264" ]
0.7239349
0
Computes the elementwise maximum of some matrices.
def sim_max(sim_mats): return np.array(sim_mats).max(axis=0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def three_array_max(array_list: List[np.ndarray]) -> np.ndarray:\n temp = np.maximum(array_list[0], array_list[1])\n all_maxs = np.maximum(temp, array_list[2])\n\n return all_maxs", "def d_max(x, y):\n axis = np.argmax(x.shape)\n return np.max(np.array([x, y]), axis=axis)", "def max(x, reduce_instance_dims=True, name=None): # pylint: disable=redefined-builtin\n return _numeric_combine(x, np.max, reduce_instance_dims, name)", "def maximum(x):\n return np.maximum(x, 0)", "def compare_max(values, weights):\n return np.max(values.numpy())", "def max(*x, na_rm: bool = False) -> Any:\n fun = numpy.nanmax if na_rm else numpy.max\n x = Collection(*x) # flatten\n return fun(x)", "def max_decode(M):\r\n return scipy.array([ f.val.argmax() for f in M])", "def find_max_product(mtx):\n max_prod = 0\n for row_num in range(20):\n vert = 0\n diag = 0\n anti_diag = 0\n horiz = horiz_max(mtx[row_num])\n if row_num < len(mtx) - 3:\n vert = vert_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n diag = diag_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n anti_diag = anti_diag_max(mtx[row_num], mtx[row_num + 1],\n mtx[row_num + 2], mtx[row_num + 3])\n max_prod = max(max_prod, horiz, vert, diag, anti_diag)\n return max_prod", "def max(x, axis=None, keepdims=False):\r\n\r\n # We have a choice of implementing this call with the\r\n # CAReduce op or the MaxAndArgmax op.\r\n\r\n # MaxAndArgmax supports grad and Rop, so we prefer to use that.\r\n # CAReduce is faster, but optimizations will replace MaxAndArgmax[0]\r\n # with CAReduce at compile time, so at this stage the important\r\n # thing is supporting all user interface features, not speed.\r\n # Some cases can be implemented only with CAReduce.\r\n\r\n # We thus prefer to use MaxAndArgmax, if possible. It does not\r\n # support all axis arguments, so we may need to fall back to CAReduce.\r\n\r\n try:\r\n out = max_and_argmax(x, axis)[0]\r\n except Exception:\r\n out = CAReduce(scal.maximum, axis)(x)\r\n\r\n if keepdims:\r\n out = makeKeepDims(x, out, axis)\r\n return out", "def maximum(image, selem, out=None, mask=None, shift_x=False, shift_y=False):\n\n return _apply(_crank8.maximum, _crank16.maximum, image, selem, out=out,\n mask=mask, shift_x=shift_x, shift_y=shift_y)", "def compute_largest_eigenvalue(A, num_simulations):\n b_k = power_iteration(A, num_simulations)\n return b_k.dot(A).dot(b_k) / (b_k.dot(b_k))", "def find_matrix_max(matrix):\n\n max_val = 0.0\n max_i = 0\n max_j = 0\n\n for i in matrix.keys():\n try:\n kvp = max(matrix[i].iteritems(), key=itemgetter(1))\n except ValueError:\n continue\n \n # Maybe I should store the max value with the array, and then always \n # store the previous largest, and when i insert or delete...\n \n if kvp[1] > max_val:\n max_val = kvp[1]\n max_i = i\n max_j = kvp[0]\n\n return (max_i, max_j, max_val)", "def maximum(lhs, rhs):\n return _make.maximum(lhs, rhs)", "def argmax(module, x, axes=None):\n return module.argmax(x, axes)", "def max_dim(elements, coordinates):\n atom_vdw_vertical = np.matrix(\n [[atomic_vdw_radius[i.upper()]] for i in elements])\n atom_vdw_horizontal = np.matrix(\n [atomic_vdw_radius[i.upper()] for i in elements])\n dist_matrix = euclidean_distances(coordinates, coordinates)\n vdw_matrix = atom_vdw_vertical + atom_vdw_horizontal\n re_dist_matrix = dist_matrix + vdw_matrix\n final_matrix = np.triu(re_dist_matrix)\n i1, i2 = np.unravel_index(final_matrix.argmax(), final_matrix.shape)\n maxdim = final_matrix[i1, i2]\n return i1, i2, maxdim", "def structured_maximum(x, y):\r\n # see decorator for function body\r", "def strict_max(x, minicolumns):\n\n x = np.reshape(x, (x.size // minicolumns, minicolumns))\n z = np.zeros_like(x)\n maxes = np.argmax(x, axis=1)\n for max_index, max_aux in enumerate(maxes):\n z[max_index, max_aux] = 1\n\n return z.reshape(x.size)", "def pmax(\n *x: Iterable,\n na_rm: bool = False\n) -> Iterable[float]:\n maxlen = max(map(length_of, x))\n x = (recycle_value(elem, maxlen) for elem in x)\n return Array([max(elem, na_rm=na_rm) for elem in zip(*x)])", "def relu(arr):\n return column_bind(zeros(len(arr)), arr).max(axis=1)", "def mrv_max1(f, g, exps, x):\n u, b = f.union(g, exps)\n return mrv_max3(f, g.do_subs(exps), g, f.do_subs(exps),\n u, b, x)", "def MaxMatrix(m):\n max = 0\n index = [0,1]\n for i in m:\n for j in i:\n if j > max:\n max = j\n index = [m.index(i),i.index(j)]\n return index", "def max(x, y):\n x[:] = np.maximum(x[:], y[:])\n return x", "def scalar_max(self, dst, src0, src1):\n return self._scalar_binary_func('max', dst, src0, src1)", "def argmax(a, *args, **kwargs):\n warn('The function argmax is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n if isinstance(a, np.ma.MaskedArray):\n return np.ma.argmax(a, *args, **kwargs)\n elif isinstance(a, np.ndarray):\n return np.argmax(a, *args, **kwargs)\n else:\n return _argmax(a)", "def max_and_argmax(a, axis=None, keepdims=False):\r\n\r\n out, argout = _max_and_argmax(a, axis)\r\n\r\n if keepdims:\r\n out = makeKeepDims(a, out, axis)\r\n argout = makeKeepDims(a, argout, axis)\r\n return [out, argout]", "def compute_maximisation( self, X, Z, O ):\n\n raise NotImplementedError", "def arglexmax(keys, multi=False):\n # Handle keys in reverse order to be consistent with np.lexsort\n reverse_keys = keys[::-1]\n arr = reverse_keys[0]\n breakers = reverse_keys[1:]\n # Look for the maximum value in the first array, and continue using new\n # arrays until a unique maximum index is found.\n _cand_idxs = np.where(arr == arr.max())[0]\n if len(_cand_idxs) > 1:\n for breaker in breakers:\n vals = breaker[_cand_idxs]\n _cand_idxs = _cand_idxs[vals == vals.max()]\n if len(_cand_idxs) == 1:\n break\n # If multiple maximum values are found then either\n # return them all or return an arbitrary one.\n return _cand_idxs if multi else _cand_idxs[0]", "def max_op(*substrate_index_arrays):\n result = numpy.max(\n numpy.stack([x.flatten() for x in substrate_index_arrays]) *\n self.species_substrate_suitability_index_array, axis=0)\n result = result.reshape(substrate_index_arrays[0].shape)\n result[substrate_index_arrays[0] == _INDEX_NODATA] = _INDEX_NODATA\n return result", "def maximum_inplace(a, b):", "def maxi(a, b):\n return max(a, b)" ]
[ "0.6566944", "0.6431522", "0.63984", "0.6369676", "0.6349694", "0.6326043", "0.6279134", "0.6229197", "0.6208424", "0.61817044", "0.61411893", "0.60957426", "0.60940033", "0.6092088", "0.60329014", "0.6031537", "0.60274714", "0.6022276", "0.60205376", "0.6020099", "0.5998723", "0.59805614", "0.5967194", "0.59653574", "0.59646475", "0.5962682", "0.5953714", "0.5928523", "0.59127104", "0.589588" ]
0.69111717
0
Takes in an Image message and identifies the locations of the three dumbbells
def identify_dbs(image): locations = {"red": Point(), "green": Point(), "blue": Point()} masks = {"red": [], "green": [], "blue": []} bridge = cv_bridge.CvBridge() image = bridge.imgmsg_to_cv2(image, "bgr8") hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) # upper and lower bounds for red # using python 3 bgr [0,0,188] = hsv [0, 255, 188] lower_red = numpy.array([0, 100, 100]) upper_red = numpy.array([10, 255, 255]) masks["red"] = cv2.inRange(hsv, lower_red, upper_red) # upper and lower bounds for green # using python 3 bgr [0,175,0] = hsv [60, 255, 175] lower_green = numpy.array([50, 100, 100]) upper_green = numpy.array([70, 255, 255]) masks["green"] = cv2.inRange(hsv, lower_green, upper_green) # upper and lower bounds for blue # using python 3 bgr [176, 0, 17] = hsv [123, 255, 176] lower_blue = numpy.array([113, 100, 100]) upper_blue = numpy.array([133, 255, 255]) masks["blue"] = cv2.inRange(hsv, lower_blue, upper_blue) x, y, w, h = 0, 0, image.shape[1]//3, image.shape[0] for color, mask in masks.items(): pixels = {"left": 0, "middle": 0, "right": 0} # define section of image to use for left, middle and right left = mask[y:y+h, x:x+w] middle = mask[y:y+h, x+w:x+w+w] right = mask[y:y+h, x+w+w:x+3*w] # count the number of pixels in each section pixels["left"] = cv2.countNonZero(left) pixels["middle"] = cv2.countNonZero(middle) pixels["right"] = cv2.countNonZero(right) location = max(pixels, key=pixels.get) # map the relative position of the db (left, middle, right) to the correct Point() locations[color] = db_locations[location] return locations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def belt(image):\n\n # Belt Detector\n x, y = circular_detector(image, 70, 80)\n\n return x, y", "def describe(image):\n needle = cv2.imread(image, 0)\n orb = cv2.ORB()\n keypoints, description = orb.detectAndCompute(needle, None)\n print(keypoints)\n print(description)\n return keypoints, description", "def identify_blocks(images):\n locations = {1: Point(), 2: Point(), 3: Point()}\n blocks = {\"left\": 0, \"middle\": 0, \"right\": 0}\n pipeline = keras_ocr.pipeline.Pipeline()\n cv2_images = []\n\n for image in images:\n bridge = cv_bridge.CvBridge()\n cv2_images.append(bridge.imgmsg_to_cv2(image, \"bgr8\"))\n\n predictions = pipeline.recognize(cv2_images)\n rospy.loginfo(predictions)\n\n blocks[\"left\"] = int(predictions[0][0][0])\n blocks[\"middle\"] = int(predictions[1][0][0])\n blocks[\"right\"] = int(predictions[2][0][0])\n\n for position, block in blocks.items():\n locations[block] = block_locations[position]\n \n return locations", "def _get_image_location(self):\n\t\timagePointer = self.labels['^IMAGE'].split()\n\t\tif len(imagePointer) == 1:\n\t\t\trecordBytes = int(self.labels['RECORD_BYTES'])\n\t\t\timageLocation = (int(imagePointer[0]) - 1) * recordBytes\n\t\telif len(imagePointer) == 2:\n\t\t\tunits = imagePointer[1]\n\t\t\tif not units == '<BYTES>':\n\t\t\t\terrorMessage = (\"Expected <BYTES> image pointer units but found %s\") % (units)\n\t\t\t\traise ValueError, (errorMessage)\n\t\t\telse:\n\t\t\t\timageLocation = int(imagePointer[0])\n\t\telse:\n\t\t\terrorMessage = (\"^IMAGE contains extra information\") % (imageSampleType)\n\t\t\traise ValueError(errorMessage)\n\t\treturn imageLocation", "def identify_image(im):\n score_cures = np.mean(im[1025:1065, 1130:1180, 0])\n score_ingredients = np.mean(im[1025:1065, 675:720, 0])\n if score_cures < 177.5:\n return 'cures'\n if score_ingredients < 177.5:\n return 'ingredients'\n else:\n return 'other'", "def _get_positions(self, image):\n\t\tH, W, _ = image.shape\n\t\tpos_list = self.apply_detection(image)\n\t\tdetections = {}\n\t\thasDetection = False\n\t\tfor i, L in enumerate(pos_list):\n\t\t\ttext, coordinates = L[0], L[1]\n\t\t\tfor x, y, w, h in coordinates:\n\t\t\t\tif x < 0 or y < 0 or x + w > W or \\\n\t\t\t\t y + h > H or w <= 1 or h <= 1:\n\t\t\t\t\tcontinue\n\t\t\t\t# add the detection to the dict for tracking\n\t\t\t\tif text == 'face' or text == 'super woman':\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -1)\n\t\t\t\telse:\n\t\t\t\t\tself.detection_index[self.num_detect] = (x, y, w, h, self.num_save, text, -2)\n\t\t\t\tdetections[self.num_detect] = (x, y, w, h)\n\t\t\t\tself.num_detect += 1\n\t\t\t\thasDetection = True\n\t\tif hasDetection:\n\t\t\tself.detection_frames[self.num_save] = detections\n\t\tself.num_save +=1", "def face_coords(img, model):\n (h, w) = img.shape[:2]\n blob = cv2.dnn.blobFromImage(cv2.resize(img, (300, 300)), 1.0, (300, 300), (104.0, 177.0, 123.0))\n\n model.setInput(blob)\n detections = model.forward()\n\n box = detections[0, 0, 0, 3:7] * np.array([w, h, w, h])\n (startX, startY, endX, endY) = box.astype(\"int\")\n\n return (startX, startY, endX, endY)", "def process_image(self, msg):\n self.cv_image = self.bridge.imgmsg_to_cv2(msg, desired_encoding=\"bgr8\")\n self.edge_detected = cv2.Canny(self.cv_image,self.minVal,self.maxVal)\n if cv2.__version__.startswith('3.'):\n _, self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n else:\n self.contours,_ = cv2.findContours(self.edge_detected, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n self.contour_image = cv2.drawContours(self.cv_image, self.contours, -1, (0,255,0), 3)\n for i in range(len(self.contours)):\n temp = self.dp(self.contours[i], 20)\n self.res.append(len(temp))\n if len(temp) == 7:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (0,0,255), 5)\n if len(temp) == 5:\n for i in range(0,len(temp)-1,2):\n cv2.line(self.contour_image, (temp[i][0],temp[i][1]),(temp[i+1][0], temp[i+1][1]), (255,0,0), 5)", "def image(self, state):\n return state['positions']", "def debug_3_locations( img, gt_location, yolo_location, rolo_location):\n img_cp = img.copy()\n for i in range(3): # b-g-r channels\n if i== 0: location= gt_location; color= (0, 0, 255) # red for gt\n elif i ==1: location= yolo_location; color= (255, 0, 0) # blue for yolo\n elif i ==2: location= rolo_location; color= (0, 255, 0) # green for rolo\n x = max(int(location[0]), 0)\n y = max(int(location[1]), 0)\n w = max(int(location[2]), 0)\n h = max(int(location[3]), 0)\n # if i== 2: cv2.rectangle(img_cp,(x-w//2, y-h//2),(x+w//2,y+h//2), color, 2)\n # elif i== 0 or i == 1: cv2.rectangle(img_cp,(x,y),(x+w,y+h), color, 2)\n cv2.rectangle(img_cp,(x-w/2,y-h/2),(x+w/2,y+h/2), color, 2)\n # cv2.imshow('3 locations',img_cp)\n # cv2.waitKey(100)\n return img_cp", "def M12Nut(image):\n kernel = np.ones((5, 5), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=4)\n\n parameters = cv2.SimpleBlobDetector_Params()\n detector = cv2.SimpleBlobDetector_create(parameters=parameters)\n keypoints = detector.detect(image)\n new_image = cv2.drawKeypoints(image, keypoints, np.array([]), (0, 0, 255), cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)\n for i in range(len(keypoints)):\n print(\"Keypoint: \", keypoints[i].pt)\n cv2.imshow(\"Keypoints\", new_image)\n cv2.waitKey(1000)\n cv2.destroyAllWindows()\n x, y = keypoints[0].pt\n\n return x, y", "def particle_images (sim,frame_id) :\n # get positions of all particles: define first the atom selection, then jump to\n # the user-requested trajectory frame, get the box dimensions (currently works\n # only for orthorhombic boxes, then calculate the image indices\n atoms = sim.u.select_atoms ('all')\n ts = sim.u.trajectory[frame_id]\n L = ts.dimensions[:3]\n pos = atoms.positions + L/2.\n return pos//L", "def get_symbol_images_and_positions(im):\n gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\n _, threshold = cv2.threshold(gray, 100, 255, 0)\n threshold = 255 - threshold\n # show(threshold)\n contours, hierarchy = cv2.findContours(threshold, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)\n contours = [cv2.approxPolyDP(contour, 8, True) for contour in contours]\n contours = [c for c in contours if c.shape[0] == 4 and cv2.isContourConvex(c)]\n contours = sorted(contours, key=cv2.contourArea)\n contour = contours[-1]\n\n offset_x, offset_y, _, _ = cv2.boundingRect(contour)\n symbols_im = trim_to_contour_bounding_box(im, contour)\n half_height = symbols_im.shape[0] / 2\n half_width = symbols_im.shape[1] / 2\n symbols = (\n symbols_im[:half_height, :half_width],\n symbols_im[:half_height, half_width:],\n symbols_im[half_height:, :half_width],\n symbols_im[half_height:, half_width:],\n\n )\n symbols = (_process_button_im(symbol_im) for symbol_im in symbols)\n\n positions = (\n (offset_x + half_width / 2, offset_y + half_height / 2),\n (offset_x + half_width * 3 / 2, offset_y + half_height / 2),\n (offset_x + half_width / 2, offset_y + half_height * 3 / 2),\n (offset_x + half_width * 3 / 2, offset_y + half_height * 3 / 2),\n )\n\n return symbols, positions", "def bbxes_data(img):\n _, _, stats, centr = cv2.connectedComponentsWithStats(img)\n # (xCentr, yCentr, area, width, height, xStart, xEnd , yStart, yEnd )\n return sorted([(cent[0], cent[1], stat[4], stat[2], stat[3], stat[0], stat[0] + stat[2], stat[1], stat[1] + stat[3])\n for cent, stat in zip(centr[1:], stats[1:])])", "def detect_face(self, img):\n # Fetch face location from the frame with 128 encoding of face landmarks\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def process_image(im):\r\n h, _, _ = im.shape\r\n im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY)\r\n \r\n # Divide the picture into 3 regions\r\n l1 = int(0.65*h)\r\n l2 = int(0.77*h)\r\n im1 = im_gray[:l1,:]\r\n im2 = im_gray[l1+1:l2,:]\r\n im3 = im_gray[l2+1:,:]\r\n \r\n # Extract 4 pictures\r\n pics = extract_4_pics(im, im1)\r\n \r\n # Extract the word size\r\n word_size = extract_word_size(im2)\r\n \r\n # Extract the letters\r\n letters = extract_letters(im3)\r\n \r\n print 'word size =', word_size\r\n print 'letters =', letters\r\n for i, pic in enumerate(pics):\r\n imsave(str(i) + '.png', pic)\r\n\r\n return word_size, letters, pics", "def get_image_characteristics(self):\r\n self.image_height, self.image_width, self.image_channels = self.image.shape\r\n\r\n # Estimate the cell size to be around a ninth of the width of the screenshot area\r\n self.cell_size = int(self.image_width / 9) | 1\r\n\r\n # Cell size should be at most a ninth of the width and at least a twentieth of the width of the screenshot\r\n # Since a typical grid is 9x9, so it should be at most a ninth of the image width, and it shouldn't be too small\r\n self.min_cell_size = int(self.image_width / 20 * self.image_width / 20)\r\n self.max_cell_size = int(self.image_width / 9 * self.image_width / 9)", "def depth_range_from_img(img, inspect):\n x0, y0, x1, y1 = TEXT_BBOX\n crop_fn = lambda x: x[x0:x1,y0:y1]\n\n if isinstance(img, str):\n img = io.imread(img)\n\n # Get string of text, look for possible floats\n chars = pytesseract.image_to_string(crop_fn(img), config=TESSERACT_CONFIG)\n numbers = [truncate(number,2) for number in re.findall('\\d+\\.\\d+', chars)]\n\n if len(numbers) < 2:\n if inspect:\n print('Less than two floats found, filling in 0\\'s')\n print(chars)\n plt.imshow(crop_fn(img))\n plt.show()\n return (0.0, 0.0)\n else:\n # Return last two possible floats\n return (float(numbers[-2]), float(numbers[-1]))", "def get_element_coordinates(path_to_image):\n return pyautogui.center(pyautogui.locateOnScreen(path_to_image, confidence=0.9))", "def test():\n\n fname='./MedData/Lung-PET-CT-Dx/Lung_Dx-A0164/04-12-2010-PET01PTheadlung Adult-08984/8.000000-Thorax 1.0 B31f-52757/1-001.dcm' \n \n ds=pydicom.dcmread(fname)\n # print(ds.pixel_array.shape)\n print(ds.pixel_array[1])\n plt.figure(figsize=(10,10))\n plt.imshow(ds.pixel_array, cmap=plt.cm.bone)\n plt.show()", "def image_handler(self, bot, update):\n text = update.message.text\n if text.startswith('/recon'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Object recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 10\n elif text.startswith('/faces'):\n bot.sendMessage(chat_id=update.message.chat_id, text='*Face recognition*\\nSend me an image',\n parse_mode=ParseMode.MARKDOWN)\n return 11", "def message_received(self, message):\n\n body_lxml = message.body_lxml()\n for image in body_lxml.iterfind(\".//img[@src]\"):\n if image.attrib['src'] == self.no_devil_banana_url:\n logger.debug(\":nodb: found in {0}'s message {1}\".format(\n message.user_name, message.id\n ))\n with self.last_lock:\n self.last_nodb_message = message.id\n elif image.attrib['src'] in self.devil_banana_urls:\n if message.user_name == self.connector.username:\n # ignore my own devil banana messages\n return\n logger.debug(\"devil banana {2} found in {0}'s message {1}\".format(\n message.user_name, message.id, image.attrib['src']\n ))\n with self.last_lock:\n self.last_banana_message_due_to_edit = False\n self.last_banana_message = message.id", "def paintings_detection(query_image, mask):\n\n image = cv2.imread(query_image)\n\n image_width = mask.shape[0]\n image_height = mask.shape[1]\n x_box_1, y_box_1, w_box_1, h_box_1, x_box_2, y_box_2, w_box_2, h_box_2 = 0, 0, 0, 0, 0, 0, 0, 0, \n\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2:]\n\n for cnt in contours:\n x, y, w, h = cv2.boundingRect(cnt)\n \n if (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 == 0):\n x_box_1, y_box_1, w_box_1, h_box_1 = x, y, w, h\n elif (w > 0.15 * image_width) & (h > 0.15 * image_height) & (w < 0.98 * image_width) & (x_box_1 != 0):\n x_box_2, y_box_2, w_box_2, h_box_2 = x, y, w, h\n\n if x_box_2 == 0:\n x_value_to_split = 0\n else:\n x_value_to_split = (x_box_1 + w_box_1/2 + x_box_2 + w_box_2/2) / 2\n\n\n return(x_value_to_split)", "def cover(img: Image, message: bitarray) -> Image:\n width, height = img.size\n check_image_width(width, RGB_PIXEL, message)\n pixels = img.load()\n\n row = random.randint(0, height) # Randomly chooses row.\n i = 0 # Tracks hidden bits\n\n # If Image consist of 8-bit pixels\n if img.mode == \"P\":\n offset = generate_offset(width, P_PIXEL, message)\n for x in range(offset, width):\n p = pixels[x,row]\n if i < len(message):\n p = modify_byte(p, message[i])\n i += 1\n pixels[x,row] = p\n generate_key(row, message, offset * P_PIXEL)\n # If Image consists of 3x8-bit pixels\n elif img.mode == \"RGB\":\n offset = generate_offset(width, RGB_PIXEL, message)\n for x in range(offset, width):\n r, g, b = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n pixels[x,row] = (r, g, b)\n generate_key(row, message, offset * RGB_PIXEL)\n # If Image consists of 4x8-bits pixels\n elif img.mode == \"RGBA\":\n offset = generate_offset(width, RGBA_PIXEL, message)\n for x in range(offset, width):\n r, g, b, a = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n if i < len(message):\n a = modify_byte(a, message[i])\n i += 1\n pixels[x,row] = (r, g, b, a)\n generate_key(row, message, offset * RGBA_PIXEL)\n\n return img", "def dimension_finder(self, text_header):\r\n dimension = re.search('\\d+x\\d+',text_header).group()\r\n dimension1 = re.findall('\\d+',dimension)[0]\r\n dimension1 = float(dimension1)\r\n dimension2 = re.findall('\\d+',dimension)[1]\r\n dimension2 = float(dimension2)\r\n return (dimension1,dimension2)", "def checkImages(self):\r\n\r\n self.leftImage, self.rightImage, res = self.receiver.getImageData()\r\n\r\n return res", "def draw_boxes_info(image, current_data):\n\n font_position1 = (50, 600)\n font_position2 = (50, 650)\n font_scale = .4\n font_thickness = 1\n\n locations = current_data[\"locations\"] #returns x1, y1, x2, y2\n frame_num = \"Frame Number: \" + str(current_data[\"frame_num\"])\n\n for box in locations:\n box_text = (\"Box locations are x1: {0}, y1: {1}, x2: {2}, y2: {3}\").format(box[1],box[3],box[0],box[2])\n\n cv2.rectangle(image, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 3)\n cv2.putText(image, box_text, font_position1, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n cv2.putText(image, frame_num, font_position2, cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 255, 255),\n font_thickness, cv2.LINE_AA)\n\n return image", "def test_get_click_locations(images):\n #Show the click locations for all images\n for i, image in enumerate(images):\n stack_bounds = get_stack_bounds(image)\n clicks = get_click_locations(stack_bounds)\n\n #Draw the number of points and the points at the center of their stacks\n clicks_image = cv2.putText(np.copy(image),\n 'num_stacks: {}'.format(len(clicks)),\n (10, 30),\n cv2.FONT_HERSHEY_DUPLEX,\n 0.75, (0, 0, 0), 1, cv2.LINE_AA)\n for click in clicks.values():\n clicks_image = cv2.circle(clicks_image, click, 5, (0,0,0), -1)\n\n original_and_clicks = hstack_images(image, clicks_image, False)\n cv2.imshow('original and clicks_image {}'.format(i), original_and_clicks)\n cv2.waitKey(0)\n cv2.destroyAllWindows()", "def detect_face_api(self, img):\n\n curr_face_loc, name_list, info_list = load_encode_loc(img, self.kwn_names,\n self.kwn_encoding,\n self.status_list, self.since_list)\n print('Current value is ', curr_face_loc, name_list)\n face_list = []\n face_area = []\n print('face loc', curr_face_loc)\n if len(curr_face_loc):\n\n for (top, right, bottom, left), name in zip(curr_face_loc, name_list):\n print(top, right, bottom, left)\n cv2.rectangle(img, (top, right), (bottom, left), (0, 255, 2), 2)\n\n w = right - left\n h = bottom - top\n cx = left + w // 2\n cy = top + h // 2\n area = w * h\n\n for idx, info in enumerate(info_list):\n cv2.putText(img, info, (bottom, int(left * idx * 0.2)),\n cv2.FONT_HERSHEY_COMPLEX, 1,\n (0, 0, 255), 1)\n\n face_list.append([cx, cy])\n face_area.append(area)\n\n i = face_area.index(max(face_area))\n\n return img, [face_list[i], face_area[i]]\n\n else:\n return img, [[0, 0], 0]", "def process(self, image):" ]
[ "0.5904444", "0.58341223", "0.5803662", "0.5694714", "0.564857", "0.5638822", "0.5634845", "0.5602222", "0.55758303", "0.5494642", "0.5491051", "0.548308", "0.5441922", "0.5413018", "0.54116", "0.5403581", "0.5379999", "0.53695244", "0.53540814", "0.5338005", "0.53375745", "0.53375083", "0.53011036", "0.5300451", "0.52988744", "0.5293107", "0.5291946", "0.5271617", "0.5260808", "0.5254867" ]
0.6506251
0
Takes in a list of Image messages and identifies the block number in each image
def identify_blocks(images): locations = {1: Point(), 2: Point(), 3: Point()} blocks = {"left": 0, "middle": 0, "right": 0} pipeline = keras_ocr.pipeline.Pipeline() cv2_images = [] for image in images: bridge = cv_bridge.CvBridge() cv2_images.append(bridge.imgmsg_to_cv2(image, "bgr8")) predictions = pipeline.recognize(cv2_images) rospy.loginfo(predictions) blocks["left"] = int(predictions[0][0][0]) blocks["middle"] = int(predictions[1][0][0]) blocks["right"] = int(predictions[2][0][0]) for position, block in blocks.items(): locations[block] = block_locations[position] return locations
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getBlocks(self):\n blocks = self.getBlocksMsg(b'\\x00')\n last_locator = self.largeMessageControl(blocks, 'inv', 0)\n\n while last_locator[1] < TARGET_BLOCK:\n blocks = self.getBlocksMsg(bytearray.fromhex(convertLittleBig(last_locator[0])))\n last_locator = self.largeMessageControl(blocks, 'inv', last_locator[1])\n\n print('\\nSuccessfully found the Block #{}: {}'.format(TARGET_BLOCK, last_locator[0]))\n return last_locator[0]", "def detectBlocksInDepthImage(self):\n depth_range_dict = {'1':[173,178],'2':[169,172],'3':[165,169],'4':[159,163],'5':[156,158],'6':[147,155],'7':[139,146],'8':[132,138]}\n depth_frame = self.DepthFrameRaw\n rgb_frame = self.VideoFrame\n rgb_frame = cv2.resize(rgb_frame, (640,480))\n depth_frame = cv2.resize(depth_frame, (640, 480))\n np.clip(depth_frame,0,2**10 - 1,depth_frame)\n depth_frame >>= 2\n depth_frame = depth_frame.astype(np.uint8)\n filt_block = []\n for k,v in depth_range_dict.items():\n thresh = cv2.inRange(depth_frame,v[0],v[1])\n cv2.imwrite(\"/home/student/armlab-w20/log/img.jpeg\", thresh)\n _ , contours, _ = cv2.findContours(thresh, 1, 2)\n for cnt in contours:\n area = cv2.contourArea(cnt)\n if area > 400 and area < 700:\n block = []\n rect = cv2.minAreaRect(cnt)\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n color = self.determine_color(rgb_frame, box)\n org = (box[0][0], box[0][1])\n rgb_frame = cv2.putText(rgb_frame, color, org,cv2.FONT_HERSHEY_SIMPLEX , 0.5 ,(0,0,0),2, cv2.LINE_AA)\n rgb_frame = cv2.drawContours(rgb_frame,[box],0,(0,0,0),0)\n self.VideoFrame = rgb_frame\n block.append(box)\n block.append(int(k))\n block.append(color)\n filt_block.append(block)\n return filt_block", "def getparts(image, block_len):\n img = image.convert('L') if image.mode != 'L' else image\n w, h = img.size \n parts = []\n # Bluring image for abandoning image details and noise.\n global opt\n for n in range(int(opt.imblev)):\n img = img.filter(ImageFilter.SMOOTH_MORE)\n # Converting image to custom palette\n imagetopalette(img, [x for x in range(256) if x%int(opt.impalred) == 0])\n pix = img.load()\n \n for x in range(w-block_len):\n for y in range(h-block_len):\n data = list(blockpoints(pix, (x,y), block_len)) + [(x,y)]\n parts.append(data)\n parts = sorted(parts)\n return parts", "def num_blocks(self): # -> int:\n ...", "def detectBlocksInDepthImage(self):\n self.depth_detect_frame = self.currentDepthFrame\n\n # 1 block\n self.bin_detect_frame_1 = cv2.inRange(self.depth_detect_frame,700,710)\n kernel = np.ones((6,6),np.uint8)\n self.bin_detect_frame_1 = cv2.erode(self.bin_detect_frame_1,kernel,iterations = 1)\n _, self.block_contours, _ = cv2.findContours(self.bin_detect_frame_1,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n \n # 2 blocks\n self.bin_detect_frame_2 = cv2.inRange(self.depth_detect_frame,680,699)\n kernel = np.ones((6,6),np.uint8)\n self.bin_detect_frame_2 = cv2.erode(self.bin_detect_frame_2,kernel,iterations = 1)\n _, self.block_contours_2, _ = cv2.findContours(self.bin_detect_frame_2,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n \n # 3 blocks\n self.bin_detect_frame_3 = cv2.inRange(self.depth_detect_frame,660,679)\n kernel = np.ones((6,6),np.uint8)\n self.bin_detect_frame_3 = cv2.erode(self.bin_detect_frame_3,kernel,iterations = 1)\n _, self.block_contours_3, _ = cv2.findContours(self.bin_detect_frame_3,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n \n pass", "def get_initial_blocks(self):\n block = []\n index = 0\n for number in self.numbers_from_file(self.input_file_name):\n block.append(number)\n if len(block) == self.block_size:\n block.sort()\n self.write_block(index, block)\n block = []\n index += 1\n else:\n if block:\n block.sort()\n self.write_block(index, block)\n index += 1\n return 0, index", "def imageparts(msg):\n # Don't want a set here because we want to be able to process them in\n # order.\n return filter(lambda part:\n part.get_content_type().startswith('image/'),\n msg.walk())", "def image_generator(self, some_messages):\n offset = 0\n outer = 0\n inner = 0\n\n for a_message in some_messages:\n msg_id = a_message.gmail_id\n for att in a_message.attachments():\n if att.type in ATTACHMENT_MIMES:\n att_type = att.type.split(\"/\")[1]\n an_image = Image(a_message, att)\n\n # map each image id with a corresponding message id for later parsing\n if an_image.id in self.mapping:\n self.mapping[msg_id].append(a_message)\n else:\n self.mapping[msg_id] = [a_message]\n\n self.num_attachments = self.count_attachments(self.num_attachments)\n\n yield an_image", "def blockDetector(self):\n # img = cv2.cvtColor(self.VideoFrame, cv2.COLOR_RGB2HSV)\n # img = cv2.resize(img, (640, 480))\n # cv2.imwrite('hsv.jpg', img)\n # mask = None\n # for color in color_ranges:\n # mask = cv2.inRange(img, color_ranges[color][0], color_ranges[color][1])\n # cv2.imwrite('blockdetect.jpg', cv2.bitwise_and(img, img, mask=mask))\n blocks = self.detectBlocksInDepthImage()\n pick_up_locs = []\n for block in blocks:\n coords = block[0]\n u = (coords[0][0] + coords[2][0]) // 2\n v = (coords[0][1] + coords[2][1]) // 2\n self.VideoFrame = cv2.circle(self.VideoFrame,(u,v), 1, (0,0,0))\n d = self.DepthFrameRaw[u,v]\n d = self.convertDepthToSI(d)\n world_coords = self.calculatePixelToWorld(u, v, d)\n world_coords[2] = self.max_depth - d\n pick_up_locs.append(world_coords)\n return pick_up_locs", "def getblocknumber(self):\n return self.getblockcount()", "def get_block_size(self, img_start):\n pattern_size = 7\n\n left, top = img_start\n block_height, block_width = 0, 0\n for i in range(1, self.image.width - left):\n if self._get_pixel((left + i * pattern_size, top)) == WHITE:\n block_width = i\n break\n for i in range(1, self.image.height - top):\n if self._get_pixel((left, top + i * pattern_size)) == WHITE:\n block_height = i\n break\n return block_width, block_height", "def find_block(int_svip, comp_block):\n print(\"-\" * 20 + \" find_block started\")\n bsz = comp_block\n outsz = 0\n bsdict = {}\n bsdict [0] = bsz\n # Build the dictionary of the host networks\n while outsz < 255:\n outsz = outsz + bsz\n bsdict[outsz] = (outsz + bsz) -1\n #print(outsz)\n \n # Determine the upper and lower bounds of the host network\n for key in bsdict.keys():\n if int_svip >= key and int_svip <= bsdict[key]:\n block_start = key\n block_end = bsdict[key]\n\n #print(\"Block start is {}\\nBlock end is {}\".format(block_start, block_end))\n return block_start, block_end", "def n_blocks(n_frames, block_length):\n return n_frames - block_length + 1", "def get_num_of_images(self):", "def image_id_at(self, i):\n return i", "def ip_get_blocks():\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=IPManager')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # get list of provisioning blocks\n blocklist = []\n for tblk in bs.find_all('table')[3].tr.div.table.find_all('tr'):\n tbx = {\n 'id': re.match(r'.+block_id=([0-9]+).*', tblk.find_all('td')[0].a['href']).group(1),\n 'prefix': tblk.find_all('td')[0].a.string,\n 'block': tblk.find_all('td')[1].string,\n 'usage': tblk.find_all('td')[2].string\n }\n blocklist.append(tbx)\n\n return bs, blocklist", "def message_received(self, message):\n\n body_lxml = message.body_lxml()\n for image in body_lxml.iterfind(\".//img[@src]\"):\n if image.attrib['src'] == self.no_devil_banana_url:\n logger.debug(\":nodb: found in {0}'s message {1}\".format(\n message.user_name, message.id\n ))\n with self.last_lock:\n self.last_nodb_message = message.id\n elif image.attrib['src'] in self.devil_banana_urls:\n if message.user_name == self.connector.username:\n # ignore my own devil banana messages\n return\n logger.debug(\"devil banana {2} found in {0}'s message {1}\".format(\n message.user_name, message.id, image.attrib['src']\n ))\n with self.last_lock:\n self.last_banana_message_due_to_edit = False\n self.last_banana_message = message.id", "def process_next_image(self):\n if self.queue:\n next_queue_item = self.queue.popleft()\n if type(next_queue_item) == str:\n if next_queue_item == 'clear':\n self.signal_status_message.emit('Clearing ROI data (from request in image queue)')\n self.clear()\n return\n [image,file_id,image_num] = next_queue_item\n # print('image_num',image_num)\n # print('next image',self.next_image)\n self.signal_status_message.emit('Started processing ID {} Im {}'.format(file_id,image_num))\n image = image - self.emccd_bias # don't edit in place because this seemed to cause an issue with images not showing in GUI. Maybe not thread safe?\n # print('image min',np.min(image))\n # print('image max',np.max(image))\n image_num_too_big = False\n for group in self.roi_groups:\n for roi in group.rois:\n try:\n roi.counts[image_num][file_id] = image[roi.x:roi.x+roi.w,roi.y:roi.y+roi.h].sum()\n except IndexError: # image_num was not valid for the number of images that MAIA is expecting\n image_num_too_big = True\n if image_num_too_big:\n self.signal_status_message.emit('Image number {} is greater than max expected images, so this image has been ignored (most likely cause is rearrangement toggle).')\n self.signal_status_message.emit('Finished processing ID {} Im {}'.format(file_id,image_num))\n self.calculate_thresholds()", "def mine_blocks(self, count):\n\n # Clear out block announcements from each p2p listener\n [x.clear_block_announcements() for x in self.nodes[0].p2ps]\n self.generatetoaddress(self.nodes[0], count, self.nodes[0].get_deterministic_priv_key().address)\n return int(self.nodes[0].getbestblockhash(), 16)", "def draw_bboxes_withindex(img,boxes, uids):\n source = Image.fromarray(img)\n draw = ImageDraw.Draw(source)\n w2,h2 = (img.shape[0],img.shape[1])\n \n font = ImageFont.truetype('/usr/share/fonts/truetype/freefont/FreeSerif.ttf', 40)\n #font = ImageFont.truetype('arial.ttf', 24)\n\n\n idx = 0\n\n for b in boxes:\n xmin,ymin,xmax,ymax = b\n \n for j in range(3):\n draw.rectangle(((xmin+j, ymin+j), (xmax+j, ymax+j)), outline=\"red\")\n draw.text((xmin+20, ymin+70), str(uids[idx]), font = font)\n idx +=1\n return source", "def cover(img: Image, message: bitarray) -> Image:\n width, height = img.size\n check_image_width(width, RGB_PIXEL, message)\n pixels = img.load()\n\n row = random.randint(0, height) # Randomly chooses row.\n i = 0 # Tracks hidden bits\n\n # If Image consist of 8-bit pixels\n if img.mode == \"P\":\n offset = generate_offset(width, P_PIXEL, message)\n for x in range(offset, width):\n p = pixels[x,row]\n if i < len(message):\n p = modify_byte(p, message[i])\n i += 1\n pixels[x,row] = p\n generate_key(row, message, offset * P_PIXEL)\n # If Image consists of 3x8-bit pixels\n elif img.mode == \"RGB\":\n offset = generate_offset(width, RGB_PIXEL, message)\n for x in range(offset, width):\n r, g, b = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n pixels[x,row] = (r, g, b)\n generate_key(row, message, offset * RGB_PIXEL)\n # If Image consists of 4x8-bits pixels\n elif img.mode == \"RGBA\":\n offset = generate_offset(width, RGBA_PIXEL, message)\n for x in range(offset, width):\n r, g, b, a = pixels[x,row]\n if i < len(message):\n r = modify_byte(r, message[i])\n i += 1\n if i < len(message):\n g = modify_byte(g, message[i])\n i += 1\n if i < len(message):\n b = modify_byte(b, message[i])\n i += 1\n if i < len(message):\n a = modify_byte(a, message[i])\n i += 1\n pixels[x,row] = (r, g, b, a)\n generate_key(row, message, offset * RGBA_PIXEL)\n\n return img", "def get_blockNumber(self, data):\n blockNumber = data['blockNumber']\n return blockNumber", "def block_offsets(self):\n ...", "def getSnapshotsOf(image):\n snapshotIds = []\n deviceMapping = image.block_device_mapping # dict of devices\n devices = deviceMapping.keys()\n for d in devices:\n snapshotId = deviceMapping[d].snapshot_id\n if snapshotId is not None:\n snapshotIds.append(snapshotId.encode())\n return snapshotIds", "def blocks_vision(self):\n return self.blocks_vision", "def blockDetector(self, frame):\n self.detectBlocksInDepthImage()\n self.block_pos = np.zeros((50,3))\n self.block_num = 0\n pixal_arm = np.zeros(2)\n if self.kinectCalibrated == True:\n # get current arm position\n real_arm_x, real_arm_y,_,_ = self.rexarm.get_wrist_pose()\n real_arm_x *= -1000\n real_arm_y *= 1000\n real_arm = np.array(([real_arm_x],[real_arm_y],[1]))\n \n # normalize arm_line vector\n arm_length = np.sqrt(real_arm_x**2 + real_arm_y**2)\n l = np.array([real_arm_x,real_arm_y])\n\n # find center of block_1_height and put them into block_position\n for cnt in self.block_contours:\n M = cv2.moments(cnt)\n if M['m00']>0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n c = np.array([[cx],[cy]])\n # Eliminate Arm itself\n real_x = self.real_coord[cx][cy][0]\n real_y = self.real_coord[cx][cy][1]\n d = np.linalg.norm(np.cross(l, np.array([real_x,real_y])))/np.linalg.norm(l)\n \n d_to_arm = np.sqrt((real_x-real_arm_x)**2+(real_y-real_arm_y)**2)\n d_to_ori = np.sqrt(real_x**2 + real_y**2)\n\n \n if d > 2 and not(d_to_ori<arm_length and d_to_arm<arm_length):\n # Check if its in our ROI\n if self.real_coord[cx][cy][0]>self.real_points[0][0] and self.real_coord[cx][cy][0]<self.real_points[2][0]:\n if self.real_coord[cx][cy][1]<self.real_points[0][1] and self.real_coord[cx][cy][1]>self.real_points[2][1]:\n # points\n self.block_pos[self.block_num][0] = self.real_coord[cx][cy][0]\n self.block_pos[self.block_num][1] = self.real_coord[cx][cy][1]\n self.block_pos[self.block_num][2] = 1\n # orientation\n rect = cv2.minAreaRect(cnt)\n self.block_ori[self.block_num][0] = -rect[2]\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # detect color\n self.block_color[self.block_num] = self.colorDetector(cx,cy)\n # draw contours\n cv2.drawContours(self.currentDetectFrame,[box],0,(30,145,86),3)\n cv2.circle(self.currentDetectFrame,(cx,cy),5,(30,145,86),-1)\n self.block_num += 1\n\n # find centers of 2 blocks\n for cnt in self.block_contours_2:\n M = cv2.moments(cnt)\n if M['m00']>0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n c = np.array([[cx],[cy]])\n # Eliminate Arm itself\n real_x = self.real_coord[cx][cy][0]\n real_y = self.real_coord[cx][cy][1]\n d = np.linalg.norm(np.cross(l, np.array([real_x,real_y])))/np.linalg.norm(l)\n \n d_to_arm = np.sqrt((real_x-real_arm_x)**2+(real_y-real_arm_y)**2)\n d_to_ori = np.sqrt(real_x**2 + real_y**2)\n \n \n if d > 2 and not(d_to_ori<arm_length and d_to_arm<arm_length):\n # Check if its in our ROI\n if self.real_coord[cx][cy][0]>self.real_points[0][0] and self.real_coord[cx][cy][0]<self.real_points[2][0]:\n if self.real_coord[cx][cy][1]<self.real_points[0][1] and self.real_coord[cx][cy][1]>self.real_points[2][1]:\n # points\n self.block_pos[self.block_num][0] = self.real_coord[cx][cy][0]\n self.block_pos[self.block_num][1] = self.real_coord[cx][cy][1]\n self.block_pos[self.block_num][2] = 2\n # orientation\n rect = cv2.minAreaRect(cnt)\n self.block_ori[self.block_num][0] = -rect[2]\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # detect color\n self.block_color[self.block_num] = self.colorDetector(cx,cy)\n # draw contours\n cv2.drawContours(self.currentDetectFrame,[box],0,(30,87,137),3)\n cv2.circle(self.currentDetectFrame,(cx,cy),5,(30,87,137),-1)\n self.block_num += 1 \n\n # find centers of 3 blocks\n for cnt in self.block_contours_3:\n M = cv2.moments(cnt)\n if M['m00']>0:\n cx = int(M['m10']/M['m00'])\n cy = int(M['m01']/M['m00'])\n c = np.array([[cx],[cy]])\n # Eliminate Arm itself\n real_x = self.real_coord[cx][cy][0]\n real_y = self.real_coord[cx][cy][1]\n d = np.linalg.norm(np.cross(l, np.array([real_x,real_y])))/np.linalg.norm(l)\n \n d_to_arm = np.sqrt((real_x-real_arm_x)**2+(real_y-real_arm_y)**2)\n d_to_ori = np.sqrt(real_x**2 + real_y**2)\n \n \n if d > 2 and not(d_to_ori<arm_length and d_to_arm<arm_length):\n # Check if its in our ROI\n if self.real_coord[cx][cy][0]>self.real_points[0][0] and self.real_coord[cx][cy][0]<self.real_points[2][0]:\n if self.real_coord[cx][cy][1]<self.real_points[0][1] and self.real_coord[cx][cy][1]>self.real_points[2][1]:\n # points\n self.block_pos[self.block_num][0] = self.real_coord[cx][cy][0]\n self.block_pos[self.block_num][1] = self.real_coord[cx][cy][1]\n self.block_pos[self.block_num][2] = 3\n # orientation\n rect = cv2.minAreaRect(cnt)\n self.block_ori[self.block_num][0] = -rect[2]\n box = cv2.boxPoints(rect)\n box = np.int0(box)\n # detect color\n self.block_color[self.block_num] = self.colorDetector(cx,cy)\n # draw contours\n cv2.drawContours(self.currentDetectFrame,[box],0,(204,6,6),3)\n cv2.circle(self.currentDetectFrame,(cx,cy),5,(204,6,6),-1)\n self.block_num += 1 \n self.block_pos[self.block_num:50] = 0\n self.block_ori[self.block_num:50] = 0\n\n return frame", "def process(self, image):", "def retrieve_new_blocks_since(number_of_last_sent_block, web3):\n new_blocks = []\n number_of_last_block = web3.eth.getBlock('latest').number\n if number_of_last_block > number_of_last_sent_block:\n number_of_blocks_to_send = number_of_last_block - number_of_last_sent_block\n for i in range(1, number_of_blocks_to_send + 1):\n new_blocks.append(web3.eth.getBlock(number_of_last_sent_block + i))\n return number_of_last_block, new_blocks\n else:\n return number_of_last_sent_block, new_blocks", "def getNum(self):\r\n return self.blockNum", "def compute_histogram_blocks(image, text_box, n_bins, color_space=\"RGB\", block_size=16):\n\n image = cv.cvtColor(image, OPENCV_COLOR_SPACES[color_space])\n\n # image_id = int(ntpath.basename(image_path.replace('.jpg', '')))\n # boxes = pickle.load(open(os.path.join(boxes_path), 'rb'))[image_id][0]\n\n if text_box:\n tlx_init = text_box[0]\n tly_init = text_box[1]\n brx_init = text_box[2]\n bry_init = text_box[3]\n\n sizeX = image.shape[1]\n sizeY = image.shape[0]\n\n hist_concat = None\n \n for i in range(0,block_size):\n for j in range(0, block_size):\n # Image block\n img_cell = image[int(i*sizeY/block_size):int(i*sizeY/block_size) + int(sizeY/block_size) ,int(j*sizeX/block_size):int(j*sizeX/block_size) + int(sizeX/block_size)]\n\n if not text_box:\n hist = compute_histogram(img_cell, n_bins, color_space)\n\n # If there's a text bounding box ignore the pixels inside it\n else:\n tlx = tlx_init-int(j*sizeX/block_size)\n tly = tly_init-int(i*sizeY/block_size)\n brx = brx_init-int(j*sizeX/block_size)\n bry = bry_init-int(i*sizeY/block_size)\n\n img_cell_vector = []\n\n for x in range(img_cell.shape[1]-1):\n for y in range(img_cell.shape[0]-1):\n if not (tlx < x < brx and tly < y < bry):\n img_cell_vector.append(img_cell[y,x,:])\n\n img_cell_vector = np.asarray(img_cell_vector)\n\n n_channels = 1 if color_space == \"GRAY\" else image.shape[2]\n # Using 3D histograms --> total_bins = n_bins_per_channel ^ n_channels\n hist=np.zeros(n_bins**n_channels,dtype=np.float32)\n\n if img_cell_vector.size!=0:\n img_cell_matrix = np.reshape(img_cell_vector,(img_cell_vector.shape[0],1,-1))\n hist = compute_histogram(img_cell_matrix, n_bins, color_space)\n\n if hist_concat is None:\n hist_concat = hist\n else:\n hist_concat = cv.hconcat([hist_concat, hist])\n\n return hist_concat" ]
[ "0.5884302", "0.5805182", "0.5722689", "0.5625986", "0.5602621", "0.55785567", "0.5520678", "0.5473761", "0.5423533", "0.53958166", "0.5387366", "0.53733194", "0.537314", "0.5340807", "0.5339228", "0.5338712", "0.533123", "0.53037155", "0.5302177", "0.52923036", "0.5285601", "0.5277623", "0.5265335", "0.52329016", "0.522606", "0.5209315", "0.5189084", "0.51883864", "0.5183319", "0.5172357" ]
0.6633512
0
Return inputted mouse position.
def get_mouse_pos(self): return self.mouse_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_mouse_position(self):\n raise NotImplementedError", "def mousePos():\n data = display.Display().screen().root.query_pointer()._data\n return data[\"root_x\"], data[\"root_y\"]", "def read_current_mouse_position():\n import pyautogui\n pyautogui.FAILSAFE = False\n return pyautogui.position()", "def mouse_position(self):\r\n # TODO: add: Now deprecated in favor of pi3d.events\r\n if self.mouse:\r\n return self.mouse.position()\r\n elif self.tkwin:\r\n return self.tkwin.winfo_pointerxy()\r\n else:\r\n return -1, -1", "def getMousePosition(self):\n return (self.mouseData.x, self.mouseData.y)", "def mousePosition(self):", "def getMouse():\n return pygame.mouse.get_pos()", "def mousePositionRaw(self):", "def mousePositionRaw(self):", "def mouse_motion_current_mouse_position() -> EventType:\n x, y = pygame.mouse.get_pos()\n return pygame.event.Event(pygame.MOUSEMOTION, {'pos': (int(x), int(y))})", "def get_mouse_pos(new_x_coord, new_y_coord):\n\n x_change = 0\n y_change = 0\n \n # if the joystick returned to its default position (0,0), stop mouse movement\n if not (new_x_coord == 0 and new_y_coord == 0):\n if new_x_coord == 0:\n x_change = 0\n else:\n x_change = new_x_coord\n\n if new_y_coord == 0:\n y_change = 0\n else:\n y_change = -new_y_coord\n \n return (int(x_change), int(y_change))", "def mouse_position_event(self, x: int, y: int):\n pass", "def get_mouse():\n if CONST.render == 'sfml':\n mouse_pos = Vector2(sfml.Mouse.get_position())/engine.screen_diff_ratio+engine.get_origin_pos()\n return mouse_pos,\\\n [sfml.Mouse.is_button_pressed(sfml.Mouse.LEFT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.RIGHT),\n sfml.Mouse.is_button_pressed(sfml.Mouse.MIDDLE)]\n elif CONST.render == 'pookoo':\n return Vector2(pookoo.input.mouse.position()), [\n False,False,False\n ]\n elif CONST.render == 'kivy':\n return Vector2(), [False,False,False]", "def pos(self):\n x = (self.ec._win._mouse_x -\n self.ec._win.width / 2.) / (self.ec._win.width / 2.)\n y = (self.ec._win._mouse_y -\n self.ec._win.height / 2.) / (self.ec._win.height / 2.)\n return np.array([x, y])", "def getMouse(self):\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n #self.update()\n _tkCall(self.update)\n if self.isClosed(): raise GraphicsError, \"getMouse in closed window\"\n time.sleep(.1) # give up thread\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def get_mouse_coordinate(self):\n pos = pygame.mouse.get_pos()\n mov = pygame.mouse.get_rel()\n row = pos[0] // (self.CELL_WIDTH + self.MARGIN)\n col = (pos[1] - self.PANEL_HEIGHT) // (self.CELL_WIDTH + self.MARGIN)\n if mov != (0, 0) and not self.env.not_in_grid(row, col):\n return (row, col)\n return self.markerPos", "def mouse_coords(desktop=False):\n x, y = c_int(0), c_int(0)\n if desktop:\n mouse.SDL_GetGlobalMouseState(byref(x), byref(y))\n else:\n mouse.SDL_GetMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))", "def getMouse(self):\n self.update() # flush any prior clicks\n self.mouseX = None\n self.mouseY = None\n while self.mouseX == None or self.mouseY == None:\n self.update()\n if self.isClosed(): raise GraphicsError(\"getMouse in closed window\")\n time.sleep(.1) # give up thread\n x,y = self.mouseX, self.mouseY\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)", "def mouse_delta():\n x, y = c_int(0), c_int(0)\n mouse.SDL_GetRelativeMouseState(byref(x), byref(y))\n return (int(x.value), int(y.value))", "def show_mouse_position_with_px(self):\n self.main_menu_greets_fonts = pygame.font.Font(os.path.join(PATH_TO_RESOURCE, 'font_forever.ttf'), 10)\n self.positiontext(f'Mouse position {pygame.mouse.get_pos()}', (770, 20))\n self.mouse = pygame.mouse.get_pos()\n return self.mouse", "def mouse_position(pos):\n x, y = pos\n m = x // SQUARE_SIZE\n n = y // SQUARE_SIZE\n return n, m", "def get_position():\n return win32api.GetCursorPos()", "def getInputPoint(self):\n return self[0].getPoint()", "def convert_mousepos(self, pos):\n tokenx, tokeny = pos\n row = int((tokenx - self.x_margin) / SPACESIZE)\n column = int((tokeny - self.y_margin) / SPACESIZE)\n return column, row", "def checkMouse(self):\n if self.isClosed():\n raise GraphicsError, \"checkMouse in closed window\"\n _tkCall(self.update)\n if self.mouseX != None and self.mouseY != None:\n x,y = self.toWorld(self.mouseX, self.mouseY)\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)\n else:\n return None", "def mouse_move(camera, mouse, width, height):\n # distance moved from screen center\n # Using the '//' operator (floor division) to produce an integer result\n x = width//2 - mouse.position[0]\n y = height//2 - mouse.position[1]\n \n # intialize mouse so it doesn't jerk first time\n try:\n camera['mouseInit']\n except KeyError:\n x = 0\n y = 0\n # bug in Add Property\n # can't use True. Have to use 1\n camera['mouseInit'] = 1\n\n logger.debug(\"Read displacement: %s, %s\" % (x, y))\n \n # return mouse movement\n return (x, y)", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def cursorPosGL(self):\n globalPos = QtGui.QCursor.pos()\n pos = self.mapFromGlobal(globalPos)\n y = self.size().height() - pos.y()\n return pos.x(), y", "def handle_mouse(self, x, y):\n pass", "def checkMouse(self):\n if self.isClosed():\n raise GraphicsError(\"checkMouse in closed window\")\n self.update()\n if self.mouseX != None and self.mouseY != None:\n x,y = self.mouseX, self.mouseY\n self.mouseX = None\n self.mouseY = None\n return Point(x,y)\n else:\n return None" ]
[ "0.80970645", "0.79738057", "0.7952894", "0.7875623", "0.7716459", "0.7669065", "0.7658517", "0.7630226", "0.7630226", "0.74635327", "0.7459193", "0.73358524", "0.7272576", "0.7244995", "0.71246344", "0.70662653", "0.70661056", "0.7031796", "0.69420946", "0.6930538", "0.6928015", "0.6922833", "0.6825276", "0.66713905", "0.6654901", "0.6612146", "0.659677", "0.659677", "0.6590026", "0.6584995" ]
0.8032351
1
Initialize type and button.
def __init__(self, event_type, button): self.type = event_type self.button = button
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self):\n self.handlers = {}\n self.start_state = None\n self.end_states = []\n self.btn = Button()", "def _init_ui(self):\r\n\t\t\r\n\t\tself.input_frame = Input(self)\r\n\t\tself.input_frame.pack()\r\n\t\t\r\n\t\tbutton_ok = Button(self, text = \"Ping\", command = self._go)\r\n\t\tbutton_ok.pack()\r\n\t\t\r\n\t\tself.result_frame = Result(self)\r\n\t\tself.result_frame.pack()", "def init_widget(self):", "def __init__(self):\r\n\r\n self.id = -1 # button's id\r\n self.cur_state = AUI_BUTTON_STATE_NORMAL # current state (normal, hover, pressed, etc.)\r\n self.location = wx.LEFT # buttons location (wxLEFT, wxRIGHT, or wxCENTER)\r\n self.bitmap = wx.NullBitmap # button's hover bitmap\r\n self.dis_bitmap = wx.NullBitmap # button's disabled bitmap\r\n self.rect = wx.Rect() # button's hit rectangle\r", "def _initializeUi(self):\r\n if self._mode == 'imperial':\r\n self.imperial_button.setChecked(True)\r\n self.imperial_button.clicked.emit()\r\n else:\r\n self.metric_button.setChecked(True)\r\n self.metric_button.clicked.emit()", "def init(self, type, cfg = \"\"):\n\t\tself.setType(type)\n\t\tself.setConfig(cfg)", "def __init__(self, type_):\n\n self.type = type_", "def initializePage(self):\n topLayout = self.layout()\n # remove old buttons from a previously set subtype\n for button in self.subtypeButtons.buttons():\n self.subtypeButtons.removeButton(button)\n topLayout.removeWidget(button)\n button.deleteLater()\n\n for id, subtype in enumerate(ExportDialog.\n exportSubtypes[ExportDialog.currentType]):\n button = QtGui.QRadioButton(ExportDialog.subtypeDescript[subtype])\n self.subtypeButtons.addButton(button, id)\n topLayout.addWidget(button)\n if subtype == ExportDialog.currentSubtype:\n button.setChecked(True)", "def _initialize(self):\n\n cancel_button = ttk.Button(\n master=self._frame,\n text=\"Cancel\",\n command=self._hide_confirmation_window\n )\n\n delete_button = ttk.Button(\n master=self._frame,\n text=\"Delete\",\n command=self._handle_delete\n )\n\n self._initilize_message()\n\n cancel_button.grid(row=1, column=0, padx=5, pady=5)\n delete_button.grid(row=1, column=1, padx=5, pady=5)", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format type'))\n\n typeButtons = QtGui.QButtonGroup(self)\n for id, exportType in enumerate(ExportDialog.exportTypes):\n button = QtGui.QRadioButton(ExportDialog.\n exportTypeDescript[exportType])\n typeButtons.addButton(button, id)\n topLayout.addWidget(button)\n if exportType == ExportDialog.currentType:\n button.setChecked(True)\n typeButtons.buttonClicked[int].connect(self.setCurrentType)", "def init_ui(self):\n raise NotImplementedError", "def init_ui(self):\n raise NotImplementedError", "def __init__(self, button_id):\r\n\r\n self.button_id = button_id", "def initialize(self):\n self._ui.img_name.setText('No files selected')\n self._ui.bt_next.setEnabled(False)\n self._ui.bt_prev.setEnabled(False)\n self._ui.bt_right.setEnabled(False)\n self._ui.bt_left.setEnabled(False)\n self._ui.gps_button.setEnabled(False)\n\n self._open_btn = QPushButton('Open File', self._ui.img_label)\n self.adjustSize()", "def initUI(self):\n startbtn = QPushButton(\"Start Recroding\", self)\n startbtn.move(30, 50)\n\n stopbtn = QPushButton(\"Stop Recording\", self)\n stopbtn.move(150, 50)\n\n initbtn = QPushButton(\"Initilize\", self)\n initbtn.move(30, 100)\n\n plotbtn = QPushButton(\"Plot\", self)\n plotbtn.move(150, 100)\n\n startbtn.clicked.connect(self.start_recording)\n stopbtn.clicked.connect(self.stop_recording)\n initbtn.clicked.connect(self.init_recording)\n plotbtn.clicked.connect(self.plot_signals)\n\n self.statusBar()\n self.statusBar().showMessage('Click Init')\n\n self.setGeometry(300, 300, 290, 150)\n self.setWindowTitle('Recorder 1.0')\n self.setWindowIcon(QIcon(\"./Static/Images/icon.jpg\"))\n self.show()", "def setup_options(self):\n self.analysis_type_label = Label(self, text='Select what you wish to do:')\n self.analysis_type_label.pack(fill=BOTH, expand=True)\n\n # Create Select option\n self._analysis_type_variable = StringVar(self)\n options1 = [\n 'Regression',\n 'Classification'\n ]\n self._analysis_type_variable.set(options1[0])\n self.type_option_menu = OptionMenu(self, self._analysis_type_variable, *options1)\n self.type_option_menu.pack()\n\n self.analyse_button = Button(self, text='NEXT', background='White', command=self.perform_analysis)\n self.analyse_button.pack(padx=5, pady=10)", "def _initUI(self) -> None:\n self._createActions()\n self._addActionsToMoveButtons()\n self._createToolBar()\n self._createStatusBar()\n self._createMainContextMenu()", "def controls_setup(self):\n pass", "def initialize(self):\n self.actions = []\n \"*** YOUR CODE HERE\"\n #raise NotImplementedError()", "def configure_widgets(self):\r\n\r\n # 'command' - callback function executed when button is pressed\r\n # since we can't pass it a function with arguments, we use the partial \r\n # function from the functools module\r\n self.btn_tl['command'] = partial(self.play, \"x\", (0,0))\r\n self.btn_tm['command'] = partial(self.play, \"x\", (0,1))\r\n self.btn_tr['command'] = partial(self.play, \"x\", (0,2))\r\n self.btn_ml['command'] = partial(self.play, \"x\", (1,0))\r\n self.btn_mm['command'] = partial(self.play, \"x\", (1,1))\r\n self.btn_mr['command'] = partial(self.play, \"x\", (1,2))\r\n self.btn_bl['command'] = partial(self.play, \"x\", (2,0))\r\n self.btn_bm['command'] = partial(self.play, \"x\", (2,1))\r\n self.btn_br['command'] = partial(self.play, \"x\", (2,2))\r\n\r\n self.btn_reset['text'] = \"Reset\"\r\n self.btn_reset['command'] = self.reset", "def __init__(self):\n\n GPIO.setup(PIN_BTN, GPIO.IN, GPIO.PUD_UP)\n GPIO.setup(PIN_RED_LED_0, GPIO.OUT, GPIO.LOW)\n GPIO.setup(PIN_BLUE_LED, GPIO.OUT, GPIO.LOW)", "def initialize_buttons(self):\r\n self.start_button = tk.Button(self.master, text='Start', command = self.startRunning)\r\n self.start_button.grid(row=0, column=0)\r\n\r\n self.pause_button = tk.Button(self.master, text='Pause', command = self.pauseRunning)\r\n self.pause_button.grid(row=0, column=1)\r\n\r\n self.graph_button = tk.Button(self.master, text='Graph', command = self.showGraph)\r\n self.graph_button.grid(row=0, column=2)\r\n \r\n self.plot_button = tk.Button(self.master, text='Plot', command = self.showPlot)\r\n self.plot_button.grid(row=0, column=3)\r\n \r\n self.draw_button = tk.Button(self.master, text='Draw', command = self.drawCells)\r\n self.draw_button.grid(row=0, column=4)\r\n \r\n # Initialize Button States and Actions\r\n self.pause_button['state'] = 'disabled'\r\n # Boolean switch to control flow of placement process\r\n self.running = False\r\n # Boolean switch to plot placement connections and tags, turn off for faster processing\r\n self.plot = False\r\n self.drawing = False\r\n self.graph = False\r\n # Boolean switch to specify first run and allow stop/continue behavior that doesn't initialize program\r\n self.firstRun = True", "def __init__(self, the_type, enabled, generate_code):\n self.__type = the_type\n self.setEnabled(enabled)\n self.setGenerateCode(generate_code)", "def gui_init(self):\n GUI_element.gui_init(self)\n\n self.hover_sound = False\n \n if not self.image is None:\n self.generic_button = False\n self.width = self.image.width if self.width == 0 else self.width\n self.height = self.image.height if self.height == 0 else self.height\n else:\n # Set up a generic button\n self.generic_button = True\n self.image = self.game.core.media.gfx['gui_button_generic_background']\n self.draw_strategy = \"gui_button\"\n\n # fixed height\n self.height = 30\n \n # Create the text\n self.generic_button_text_object = Text(self.game.core.media.fonts[\"generic_buttons\"], self.x, self.y + (self.height / 2), TEXT_ALIGN_CENTER, self.generic_button_text)\n self.generic_button_text_object.z = self.z - 1\n self.generic_button_text_object.colour = (1.0,1.0,1.0)\n \n # Set up the width, if we have a larger than normal width then we want to centre the text.\n if self.width < self.generic_button_text_object.text_width + 20:\n self.width = self.generic_button_text_object.text_width + 20\n self.generic_button_text_object.x += (self.width / 2)\n \n self.sequence_count = self.image.num_of_frames\n self.draw_strategy_call_parent = False", "def init_ui(self):\n self.master.title(\"Backbone\")\n self.master.geometry(\"300x150\")\n\n self.pack(fill=BOTH, expand=1)\n\n self.btn_upload_file = Button(self, text=\"Upload file\", command=self.upload_file)\n self.btn_upload_file.place(x=90, y=10)\n\n self.btn_create_training_file = Button(self, text=\"Create & upload training file\",\n command=self.create_training_file)\n self.btn_create_training_file.place(x=30, y=40)\n\n self.btn_run_algorithm = Button(self, text=\"Run algorithm\", command=self.run_algorithm)\n self.btn_run_algorithm.place(x=80, y=70)\n\n self.btn_view_results = Button(self, text=\"View Results\", command=self.view_results)\n self.btn_view_results.place(x=85, y=100)", "def __init__(self, parent=None):\n super().__init__(parent)\n\n topLayout = QtGui.QVBoxLayout(self)\n self.setLayout(topLayout)\n self.setTitle(_('Choose export format subtype'))\n self.subtypeButtons = QtGui.QButtonGroup(self)\n self.subtypeButtons.buttonClicked[int].connect(self.setCurrentSubtype)", "def initialize(self,init_info):\n self.action_info = init_info.actions\n return True", "def __init__buttons(language, color_writing, color_end_b_writing, field_size, button_bg_color):\r\n _create_buttons(field_size, button_bg_color) # creates buttons\r\n __init__button_writings(language, color_writing, color_end_b_writing, field_size) # creates writings\r", "def __init__(self):\n self.window = Tk() # The main window\n self.__initialize_variables__() # Initialize the variables\n self.__initialize_menu__() # Initialize the Menu\n self.__initialize_status_bar__()\n self.__initialize_gui__() # Initialize the GUI widgets", "def create_widgets(self):\n #create first button\n self.button1 = Button(self, text = \"Start\")\n self.button1.bind\n self.button1.grid()" ]
[ "0.6788451", "0.6683954", "0.65892327", "0.6497996", "0.64862543", "0.6442674", "0.6437974", "0.64198774", "0.64093477", "0.63769287", "0.63739365", "0.63739365", "0.6325634", "0.6292619", "0.6286793", "0.62425756", "0.62415385", "0.6241002", "0.6228872", "0.619486", "0.61938703", "0.6183539", "0.6172546", "0.61667246", "0.6157678", "0.61474746", "0.613011", "0.61263514", "0.61243594", "0.61141825" ]
0.7603971
0
get final coach for each session coach with more than half the season will be the credited coach for eventual playoff and championship won
def get_final_coach_for_each_season(self): self.final_coach_for_season = ( self.num_days_coach_for_season .groupby(['Season','TeamID']) .agg({"CoachName":"count"}) .reset_index() .rename(columns={"CoachName":"coach_counts"}) .merge(self.num_days_coach_for_season,how='left',on=['Season','TeamID']) .pipe(lambda x:x.assign(final_coach = np.where(x.num_season >= 0.5, x.CoachName, "ignore"))) [['Season','TeamID','final_coach']] )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_championship_won_for_each_coach(self):\n self.championship_team = (\n self.raw_data_postseason\n .merge(self.season_max_days,how='left',on=['Season'])\n .query(\"DayNum == season_max_days\")\n .groupby(['Season','WTeamID'])\n .agg({\"NumOT\":\"count\"})\n .reset_index()\n .rename(columns={\"NumOT\":\"is_champion\",\"WTeamID\":\"TeamID\"})\n )", "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def get_win_rate_post_season_for_each_coach(self):\n # get winning games for coaches\n self.post_games_won_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n # get losing games for coaches\n self.post_games_lose_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df for post season\n self.combine_post_games_won_lose = (\n self.post_games_lose_for_coaches\n .merge(self.post_games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_post = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"post_games_lost\",\"which_coach_for_win\":\"post_games_won\"})\n .fillna(0)\n )", "def combine_playoff_championship_for_each_coach(self):\n self.final_coach_with_postseason_champion_each_year = (\n self.final_coach_with_postseason_each_year\n .merge(self.championship_team,how='left',on=['Season','TeamID'])\n .fillna(0)\n )", "def get_tourney_rounds(self, conference, year):\n ts_dict = self.get_tourney_slots()\n seed_dict = self.get_tourney_seeds()\n tr_dict = self.get_tourney_results()\n \n round_1 = list()\n round_2 = list()\n round_3 = list()\n round_4 = list()\n winner = list()\n \n round1_winners = list()\n for seed, team in seed_dict[year].items():\n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n round1_winners.append(seed[1:])\n #removes duplicates because I did this part weirdly... HEHEH\n round1_winners = list(set(round1_winners))\n\n win_counter = defaultdict(int)\n for seed, team in seed_dict[year].items(): \n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n win_counter[winning] += 1\n \n for slot, matchup in ts_dict[year].items():\n \n if conference in slot and \"R1\" in slot: \n round_1.append(\"{}-{}\".format(matchup[1:3], matchup[-2:]))\n round_1 = sorted(round_1)\n #for match in round_1:\n for winner1 in round1_winners:\n if winner1 in round_1[0]:\n for winner2 in round1_winners:\n if winner2 in round_1[-1]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[1]:\n for winner2 in round1_winners:\n if winner2 in round_1[-2]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[2]:\n for winner2 in round1_winners:\n if winner2 in round_1[-3]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[3]:\n for winner2 in round1_winners:\n if winner2 in round_1[-4]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n round_2 = sorted(round_2)\n\n round2_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 1:\n round2_winners.append(seed[1:])\n \n for winner1 in round2_winners:\n if winner1 in round_2[0]:\n for winner2 in round2_winners:\n if winner2 in round_2[-1]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_2[1]:\n for winner2 in round2_winners:\n if winner2 in round_2[-2]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n round_3 = sorted(round_3)\n\n round3_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 2:\n round3_winners.append(seed[1:])\n\n for winner1 in round3_winners:\n if winner1 in round_3[0]:\n for winner2 in round3_winners:\n if winner2 in round_3[-1]:\n round_4.append(\"{}-{}\".format(winner1, winner2))\n round_4 = sorted(round_4)\n\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 3:\n winner.append(seed[1:])\n\n conferences = {\"W\": \"East\", \"X\": \"Midwest\", \"Y\": \"South\", \"Z\": \"West\"}\n\n #print(\"CONFERENCE: {}, YEAR: {}\".format(conferences[conference], year))\n #print(\"ROUND1:\", round_1)\n #print(\"ROUND2:\", round_2)\n #print(\"ROUND3:\", round_3)\n #print(\"ROUND4:\", round_4)\n #print(\"WINNER:\", winner)\n\n #clearing out the tourney results dictionary\n #tr_dict.clear()\n\n return round_1, round_2, round_3, round_4, winner", "def find_opponent(standings,odds):\n\n # simulate all games\n for i in range(len(odds)):\n play_game(odds.loc[i],standings)\n\n # update the points and GD tally\n standings['P']=standings['W']*3 + standings['D']\n standings['GD']=standings['F']-standings['A']\n\n # see if teams have equal amount of points, and award h2h_points for\n # h2h results against those teams.\n for group in \"ABCDEF\":\n gelijk = standings.loc[standings['Group']==group][standings.loc[standings['Group']==group].duplicated(subset='P',keep=False)]\n gelijk[\"h2h_points\"]=np.zeros(len(gelijk))\n\n for i in gelijk.index:\n for team1 in gelijk.loc[i][\"h2h\"]:\n for team2 in gelijk[\"TEAMS\"]:\n if team1==team2:\n standings.loc[i,\"h2h_points\"]+=1\n\n # sort the final standings\n standings = standings.sort_values(by=['Group','P',\"h2h_points\",'GD','F','W'],ascending=[True,False,False,False,False,False])\n\n # determine third placed teams\n standings = standings.reset_index()\n third = standings.loc[[2,6,10,14,18,22]]\n\n # determine best number threes\n third = third.sort_values(by=['P','GD','F','W'],ascending=False)\n\n groups_of_best_no_3 = \"\"\n for i in third.head(4).Group:\n groups_of_best_no_3+=i\n groups_of_best_no_3 = ''.join(sorted(groups_of_best_no_3))\n\n # look up the opponent of the dutch team\n a = third.loc[third.Group == opponent_matrix[groups_of_best_no_3]]['TEAMS']\n\n return a.reset_index().TEAMS[0]", "def play_game(game,standings_):\n rand_nmr = random.random()\n\n standings_.loc[standings_.TEAMS==game['Home'],'MP'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'MP'] += 1\n\n if rand_nmr < game['Prob Home']:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away'],'L'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'A'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home']][\"h2h\"].apply(lambda x:x.append(game['Away']))\n\n return 0\n\n elif rand_nmr < game['Prob Home'] + game['Prob Draw']:\n # all draws end in 0-0 this can be improved\n standings_.loc[standings_.TEAMS==game['Home'],'D'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'D'] += 1\n\n return 1\n\n else:\n n_goals = goals() # a random number of goals is added to the goal tally, all games and in 1-0,2-0,3-0 or 4-0. This can be improved\n standings_.loc[standings_.TEAMS==game['Away'],'W'] += 1\n standings_.loc[standings_.TEAMS==game['Away'],'F'] += n_goals\n standings_.loc[standings_.TEAMS==game['Home'],'A'] += 1\n standings_.loc[standings_.TEAMS==game['Home'],'L'] += n_goals\n standings_.loc[standings_.TEAMS==game['Away']][\"h2h\"].apply(lambda x:x.append(game['Home']))\n\n return 2", "def first_round_history(self):\n self.ts_dict = self.get_tourney_slots()\n self.tsr_dict = self.match_seeds()\n first_seed_win = 0\n second_seed_win = 0\n third_seed_win = 0\n fourth_seed_win = 0\n fifth_seed_win = 0\n sixth_seed_win = 0\n seventh_seed_win = 0\n eighth_seed_win = 0\n total_games = 128\n\n for year1 in self.ts_dict: \n for slot, match_up in self.ts_dict[year1].items():\n if slot[:2] == \"R1\":\n for year2 in self.tsr_dict:\n if year1 == year2:\n for winning, losing in self.tsr_dict[year2].items():\n if winning[5:] == match_up[:3]:\n seed = winning[6:] \n if seed == \"01\":\n first_seed_win += 1\n elif seed == \"02\":\n second_seed_win += 1\n elif seed == \"03\":\n third_seed_win += 1\n elif seed == \"04\":\n fourth_seed_win += 1\n elif seed == \"05\":\n fifth_seed_win += 1\n elif seed == \"06\":\n sixth_seed_win += 1\n elif seed == \"07\":\n seventh_seed_win += 1\n elif seed == \"08\":\n eighth_seed_win += 1 \n \n #print(first_seed_win, second_seed_win, third_seed_win, fourth_seed_win, fifth_seed_win, sixth_seed_win, seventh_seed_win, eighth_seed_win, total_games)\n\n gauge = pygal.SolidGauge(inner_radius=0.70, title=\"NCAA First Round Results\")\n ratio_first_seed = int(first_seed_win / total_games * 100)\n ratio_second_seed = int(second_seed_win / total_games * 100)\n ratio_third_seed = int(third_seed_win / total_games * 100)\n ratio_fourth_seed = int(fourth_seed_win / total_games * 100)\n ratio_fifth_seed = int(fifth_seed_win / total_games * 100)\n ratio_sixth_seed = int(sixth_seed_win / total_games * 100)\n ratio_seventh_seed = int(seventh_seed_win / total_games * 100)\n ratio_eighth_seed = int(eighth_seed_win / total_games * 100) \n\n percent_formatter = lambda x: '{:.10g}%'.format(x)\n gauge.value_formatter = percent_formatter\n gauge.add('1 vs. 16', [{'value': ratio_first_seed, 'max_value': 100}])\n gauge.add('2 vs. 15', [{'value': ratio_second_seed, 'max_value': 100}])\n gauge.add('3 vs. 14', [{'value': ratio_third_seed, 'max_value': 100}])\n gauge.add('4 vs. 13', [{'value': ratio_fourth_seed, 'max_value': 100}])\n gauge.add('5 vs. 12', [{'value': ratio_fifth_seed, 'max_value': 100}])\n gauge.add('6 vs. 11', [{'value': ratio_sixth_seed, 'max_value': 100}])\n gauge.add('7 vs. 10', [{'value': ratio_seventh_seed, 'max_value': 100}])\n gauge.add('8 vs. 9', [{'value': ratio_eighth_seed, 'max_value': 100}])\n \n gauge.render_to_file('chart.svg')", "def non_current_championships():\n current_championships = (Alfa_Romeo+Ferrari+Haas+McLaren+Mercedes+Racing_Point+Red_Bull+Renault+Toro_Rosso+Williams).constructors_championships_years\n non_current_championships = []\n year = 1958\n while year < 2020:\n if year not in current_championships:\n non_current_championships.append(year)\n year += 1\n return f\"The F1 Constructors' Championships won by teams no longer on the grid are: \\n{non_current_championships}\"", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def season_series(game_id, pref_team, other_team, last_season=False):\n\n # Init empty dictionaries and lists\n games_against = list()\n pref_toi = dict()\n pref_goals = dict()\n pref_assists = dict()\n pref_points = dict()\n pref_record = {\"wins\": 0, \"losses\": 0, \"ot\": 0}\n roster_player = True\n\n # If this is the first game of the season, we can set the 'last_season' flag to enable the\n # season series function to check last year's season series between the two teams.\n if not last_season:\n season_start = str(game_id)[0:4]\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={yesterday:%Y-%m-%d}\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n else:\n season_start = int(str(game_id)[0:4]) - 1\n season_end = str(int(season_start) + 1)\n yesterday = datetime.now() - timedelta(days=1)\n # yesterday = datetime.now() + timedelta(days=50)\n # schedule_url = (\n # f\"/schedule?teamId={pref_team.team_id}\"\n # f\"&expand=schedule.broadcasts,schedule.teams&startDate=\"\n # f\"{season_start}-08-01&endDate={season_end}-06-01\"\n # )\n schedule_url = (\n f\"/schedule?teamId={pref_team.team_id}\"\n f\"&expand=schedule.broadcasts,schedule.teams\"\n f\"&season={season_start}{season_end}\"\n )\n\n schedule = api.nhl_api(schedule_url).json()\n dates = schedule[\"dates\"]\n\n # Loop through scheduled to get previously played games against\n for date in dates:\n game = date[\"games\"][0]\n game_type = game[\"gameType\"]\n game_id = game[\"gamePk\"]\n game_team_home = game[\"teams\"][\"home\"][\"team\"][\"name\"]\n game_team_away = game[\"teams\"][\"away\"][\"team\"][\"name\"]\n teams = [game_team_away, game_team_home]\n game_status = game[\"status\"][\"abstractGameState\"]\n if game_type == \"R\" and game_status == \"Final\" and other_team.team_name in teams:\n game_feed = f\"/game/{game_id}/feed/live\"\n games_against.append(game_feed)\n\n # If the two teams haven't played yet, just exit this function\n if not games_against:\n return None, None, None\n\n # Loop through newly created games_against list to get each stats\n for feed in games_against:\n game = api.nhl_api(feed).json()\n game_data = game[\"gameData\"]\n home_team_name = game_data[\"teams\"][\"home\"][\"name\"]\n pref_homeaway = \"home\" if home_team_name == pref_team.team_name else \"away\"\n other_homeaway = \"away\" if home_team_name == pref_team.team_name else \"home\"\n\n # Get season series\n end_period = game[\"liveData\"][\"linescore\"][\"currentPeriod\"]\n extra_time = True if end_period > 3 else False\n pref_score = game[\"liveData\"][\"linescore\"][\"teams\"][pref_homeaway][\"goals\"]\n other_score = game[\"liveData\"][\"linescore\"][\"teams\"][other_homeaway][\"goals\"]\n if pref_score > other_score:\n pref_record[\"wins\"] += 1\n elif other_score > pref_score and extra_time:\n pref_record[\"ot\"] += 1\n else:\n pref_record[\"losses\"] += 1\n\n season_series_str = f\"Series: {pref_record['wins']}-\" f\"{pref_record['losses']}-{pref_record['ot']}\"\n\n # Get stats leaders\n # pref_teamstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"teamStats\"]\n pref_playerstats = game[\"liveData\"][\"boxscore\"][\"teams\"][pref_homeaway][\"players\"]\n for id, player in pref_playerstats.items():\n try:\n # Calculate TOI\n player_toi_str = player[\"stats\"][\"skaterStats\"][\"timeOnIce\"]\n player_toi_minutes = int(player_toi_str.split(\":\")[0])\n player_toi_seconds = int(player_toi_str.split(\":\")[1])\n player_toi = (player_toi_minutes * 60) + player_toi_seconds\n pref_toi[id] = pref_toi.get(id, 0) + player_toi\n\n # Point Totals\n player_goal_str = player[\"stats\"][\"skaterStats\"][\"goals\"]\n pref_goals[id] = pref_goals.get(id, 0) + int(player_goal_str)\n player_assist_str = player[\"stats\"][\"skaterStats\"][\"assists\"]\n pref_assists[id] = pref_assists.get(id, 0) + int(player_assist_str)\n player_points = int(player_goal_str) + int(player_assist_str)\n pref_points[id] = pref_points.get(id, 0) + int(player_points)\n\n except KeyError:\n pass\n\n # Calculate Stats Leaders\n sorted_toi = sorted(pref_toi.values(), reverse=True)\n leader_toi = sorted_toi[0]\n\n sorted_points = sorted(pref_points.values(), reverse=True)\n leader_points = sorted_points[0]\n\n # Get TOI leader\n for id in pref_toi.keys():\n if pref_toi[id] == leader_toi:\n player_name = roster.player_attr_by_id(pref_team.roster, id, \"fullName\")\n if player_name is None:\n roster_player = False\n player_id_only = id.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n leader_toi_avg = leader_toi / len(games_against)\n m, s = divmod(leader_toi_avg, 60)\n toi_m = int(m)\n toi_s = int(s)\n toi_s = \"0{}\".format(toi_s) if toi_s < 10 else toi_s\n toi_avg = \"{}:{}\".format(toi_m, toi_s)\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n toi_leader_str = \"TOI Leader: {} with {} / game.\".format(player_short_name, toi_avg)\n\n # Handle tied points leaders\n point_leaders = list()\n for id in pref_points.keys():\n if pref_points[id] == leader_points:\n point_leaders.append(id)\n\n if leader_points == 0:\n points_leader_str = \"Points Leader: None (all players have 0 points).\"\n\n elif len(point_leaders) == 1:\n leader = point_leaders[0]\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n # If the player is no longer on the team, get their information (change string here?)\n if player_name is None:\n roster_player = False\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n if not roster_player:\n points_leader_str = (\n f\"Points Leader: {player_name} with {leader_points} points \"\n f\"({player_goals}G {player_assists}A) \"\n )\n else:\n points_leader_str = \"Points Leader: {} with {} ({}G {}A).\".format(\n player_name, leader_points, player_goals, player_assists\n )\n\n elif len(point_leaders) > 3:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n point_leaders_with_attrs.append(player_short_name)\n\n point_leaders_joined = \", \".join(point_leaders_with_attrs[0:3])\n leftover_leaders = len(point_leaders) - 3\n points_leader_str = (\n f\"Points Leaders: {point_leaders_joined} & {leftover_leaders} others ({leader_points} each).\"\n )\n\n else:\n point_leaders_with_attrs = list()\n for leader in point_leaders:\n player_name = roster.player_attr_by_id(pref_team.roster, leader, \"fullName\")\n if player_name is None:\n player_id_only = leader.replace(\"ID\", \"\")\n player_name = roster.nonroster_player_attr_by_id(player_id_only, \"fullName\")\n player_goals = pref_goals[leader]\n player_assists = pref_assists[leader]\n player_short_name = f\"{player_name[0]}. {' '.join(player_name.split()[1:])}\"\n player_str = f\"{player_short_name} ({player_goals}G {player_assists}A)\"\n point_leaders_with_attrs.append(player_str)\n\n point_leaders_joined = (\n f\", \".join(point_leaders_with_attrs[:-1]) + f\" & {point_leaders_with_attrs[-1]}\"\n )\n points_leader_str = \"Points Leaders: {} with {} each.\".format(point_leaders_joined, leader_points)\n\n return season_series_str, points_leader_str, toi_leader_str", "def marcels_players(goalie, date, df):\n # 0 = that year, 1 is year b4 ....\n marcel_weights = [.36, .29, .21, .14]\n reg_const = 2000\n reg_avg = 0 # Where to regress to\n\n # Use past 3 season to weight games played -> Just take weighted average\n gp_weights = [8, 4, 2, 0]\n\n season = int(helpers.get_season(date))\n\n weighted_goals_sum, weighted_fen_sum, weighted_xg_sum, weights_marcel_sum = 0, 0, 0, 0\n weighted_gp_sum, weights_gp_sum = 0, 0\n\n # Past 4 Seasons\n for i in range(0, 4):\n if season - i > 2006:\n # Subset from stats df\n df_goalie = df[(df['player'] == goalie) & (df['season'] == (season - i))]\n\n # Sanity Check\n if df_goalie.shape[0] > 1:\n print(\"Too many rows!!!!!!!\")\n exit()\n\n # If he played that year\n if not df_goalie.empty:\n weighted_goals_sum += df_goalie.iloc[0]['goals_a'] * marcel_weights[i]\n weighted_fen_sum += df_goalie.iloc[0]['fenwick_a'] * marcel_weights[i]\n weighted_xg_sum += df_goalie.iloc[0]['xg_a'] * marcel_weights[i]\n weighted_gp_sum += df_goalie.iloc[0]['games'] * gp_weights[i]\n\n # -> To divide by at end...normalize everything\n weights_marcel_sum += marcel_weights[i]\n weights_gp_sum += gp_weights[i]\n\n # Normalize weighted sums\n weighted_xg_sum = weighted_xg_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_goals_sum = weighted_goals_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_fen_sum = weighted_fen_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n\n # Get Regressed fsv%\n if weighted_fen_sum != 0:\n weighted_adj_fsv = ((1 - weighted_goals_sum / weighted_fen_sum) - (1 - weighted_xg_sum / weighted_fen_sum)) * 100\n else:\n weighted_adj_fsv = 0\n reg_adj_fsv = weighted_adj_fsv - ((weighted_adj_fsv - reg_avg) * (reg_const / (reg_const + weighted_fen_sum)))\n\n # Get weighted gp\n weighted_gp_sum = weighted_gp_sum / weights_gp_sum if weights_gp_sum != 0 else 0\n\n return {'fsv': reg_adj_fsv, 'gp': weighted_gp_sum}", "def perc_greedy(population, percentage=80):\n \n\n #initialization\n res_arr = [2] * 10\n total_knights = 80\n\n medians = get_medians(population, percentage);\n\n while(total_knights > 0):\n \n # find \"easiest\" to acheive\n ind = medians.index(min(medians))\n\n # calculate the number of knights to assign to that castle\n assign = min(total_knights, medians[ind]-res_arr[ind] + 1)\n\n # make assignment\n res_arr[ind] += assign\n total_knights -= assign\n\n # mark that castle as \"done\"\n medians[ind] = 100\n \n # get the score of result inst against input population\n res_inst = CBInstance(res_arr)\n res_score = grade_inst(res_inst, population)\n \n return res_inst", "def games(self, competition_id: int, season_id: int) -> DataFrame[Any]:", "def save_games_copa(season, logging_level=logging.INFO):\n logging.basicConfig(level=logging_level)\n logger = logging.getLogger(__name__)\n\n logger.info('Starting the download of games...')\n\n if season.season == get_current_season():\n current_game_events_ids = season.get_current_game_events_ids_copa()\n game_ids_list = list(current_game_events_ids.values())\n else:\n game_ids_list=season.get_game_ids_copa()\n\n n_checkpoints = 4\n checkpoints = [round(i * float(len(game_ids_list)) / n_checkpoints) for i in range(n_checkpoints + 1)]\n for i in range(len(game_ids_list)):\n\n game_id=int(game_ids_list[i]) % 1000\n url2 = BASE_URL + \"/fichas/CREY{}.php\".format(game_ids_list[i])\n filename = os.path.join(season.GAMES_COPA_PATH, str(game_id)+\"-\" +str(game_ids_list[i]) + '.html')\n\n open_or_download(file_path=filename, url=url2)\n if i in checkpoints:\n logger.info('{}% already downloaded'.format(round(float(i * 100) / len(game_ids_list))))\n\n logger.info('Download finished! (new {} games in {})\\n'.format(len(game_ids_list), season.GAMES_COPA_PATH))", "def test_get_player_upcoming_chests(self):\n pass", "def strategy_cheap(cookies, cps, history, time_left, build_info):\n print\n print \"STRATEGY PART BEGIN\"\n print\n items_available = []\n for item in build_info.build_items():\n items_available.append(item)\n while items_available:\n min_cost = float('inf')\n for item in items_available:\n #print \"item:\", item, \", cost:\", build_info.get_cost(item)\n if build_info.get_cost(item) < min_cost:\n min_cost = build_info.get_cost(item)\n cheapest = item\n print \"cheapest:\", cheapest\n # check if time enough\n print \"checking time\"\n print \"time left:\", time_left\n print \"cost:\", min_cost\n print \"cookies can be produced:\", cps * time_left\n if cps * time_left + cookies < min_cost:\n print \"not enough,\"\n return None\n else:\n print cheapest, \"chosen\"\n print \"STRATEGY PART END\"\n print\n return cheapest", "def _name_champion(self):\n # TODO BREAK TIES\n return max(self.teams, key=lambda team: len(team.wins))", "def possessions_home_away(data_frame, mode):\n total_games_dict, total_dict, percentage_dict = dict(), dict(), dict()\n team_name = 'home_team' if mode == 'home' else 'away_team'\n for index, row in data_frame.iterrows():\n if row[team_name] not in total_games_dict:\n percentage_dict[row[\"id\"]] = 0\n else:\n percentage_dict[row[\"id\"]] = format(float(total_dict[row[team_name]]) / float(total_games_dict[row[team_name]]), '.2f')\n\n if row[team_name] in total_games_dict:\n total_games_dict[row[team_name]] += 1\n else:\n total_games_dict[row[team_name]] = 1\n\n fga, orb, fta, to = 'fg_made_attempted_', 'offensive_rebounds_', 'ft_made_attempted_', 'turnovers_'\n\n possessions = int(row[fga + team_name.split('_')[0]].split('-')[1]) - int(row[orb + team_name.split('_')[0]]) + \\\n (0.475 * int(row[fta + team_name.split('_')[0]].split('-')[1])) + int(row[to + team_name.split('_')[0]])\n\n if row[team_name] in total_dict:\n total_dict[row[team_name]] += possessions\n else:\n total_dict[row[team_name]] = possessions\n\n return percentage_dict", "def mk_champs(cls, year, start_date, end_date):\n seasons = [\"Quad Quandary\", \"Face Off\", \"Hot Shot\", \"Get Over It\", \"Bowled Over\", \"Ring It Up\", \"Block Party\",\n \"Cascade Effect\", \"RES-Q\", \"Velocity Vortex\", \"Relic Recovery\", \"Rover Ruckus\"]\n season_name = seasons[year-2007]\n start_date = datetime.datetime.strptime(start_date, \"%Y-%m-%d\")\n end_date = datetime.datetime.strptime(end_date, \"%Y-%m-%d\")\n season = f\"{year % 100:02}{(year + 1) % 100:02}\"\n # fyear = f\"{year}-{(year+1)%1000:02d}\"\n if year == 2009:\n city, state_prov, country = \"Atlanta\", \"Georgia\", \"USA\"\n venue = \"Georgia Dome\"\n address = \"1 Georgia Dome Dr, Atlanta, GA 30313\"\n elif year < 2013:\n city, state_prov, country = \"St. Louis\", \"Missouri\", \"USA\"\n venue = \"Edward Jones Dome\"\n address = \"701 Convention Plaza, St. Louis, MO 63101\"\n else:\n city, state_prov, country = \"St. Louis\", \"Missouri\", \"USA\"\n venue = \"Union Station\"\n address = \"1820 Market Street, St. Louis, MO 63103\"\n shared = {\n \"year\": year,\n \"city\": city,\n \"state_prov\": state_prov,\n \"country\": country,\n \"end_date\": end_date,\n \"event_type\": EventType.WORLD_CHAMPIONSHIP,\n \"venue\": venue,\n \"address\": address,\n \"data_sources\": [\"USFIRST Website Archives\"]\n }\n\n finals = Event(key=f\"{season}cmp0\",\n name=f\"FTC {season_name} World Championship - Finals\",\n playoff_type=PlayoffType.BO3_FINALS, \n division_keys=[f\"{season}cmp1\", f\"{season}cmp2\"],\n start_date=end_date,\n **shared)\n franklin = Event(key=f\"{season}cmp{2}\",\n name=f\"FTC {season_name} World Championship - Franklin Division\",\n playoff_type=PlayoffType.STANDARD, \n parent_event_key=f\"{season}cmp0\", \n start_date=start_date,\n **shared)\n edison = Event(key=f\"{season}cmp{1}\",\n name=f\"FTC {season_name} World Championship - Edison Division\",\n playoff_type=PlayoffType.STANDARD,\n parent_event_key=f\"{season}cmp0\", \n start_date=start_date,\n **shared)\n return (franklin, edison, finals)", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def comp101_game(points, server):\n \n player0_points = 0 # sets initial 'points' of both players\n player1_points = 0 \n final0_score = 0 # final 'score' of both players in a manner peculiar to\n final1_score = 0 # tennis\n remainder = [] # stores the remaining 'points' if the game has ended\n tennis_score = {0: 0, 1: 15, 2: 30, 3: 40, 4: 40} # use to convert\n # 'points' to tennis\n # 'scores'\n winner = None # initial winner of the game\n \n # tests every 'points' in 'points'\n for number in points:\n \n # finds the 'point' differences between both players and make\n # sure it is a positive value\n points_diff = abs(player0_points - player1_points)\n \n if (player0_points >= 4 or player1_points >= 4):\n \n # the case when a 'winner' is found and stores the \n # remaining 'points'\n if points_diff >= 2:\n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n \n else:\n winner = 1\n final1_score = \"W\"\n remainder.append(number)\n \n # the case when there is no 'winner' yet \n else:\n \n if number == 0:\n player0_points += 1\n\n else:\n player1_points += 1\n \n # updates the latest 'point' difference\n points_diff = abs(player0_points - player1_points)\n \n # ONLY runs if a player 'won' the game after exactly getting \n # his next 'point'\n if points_diff >= 2:\n \n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n \n else:\n winner = 1\n final1_score = \"W\"\n \n # if one of the player gets an \"advantage\"\n elif points_diff == 1:\n \n if player0_points > player1_points:\n final0_score = \"Ad\"\n final1_score = 40\n else:\n final0_score = 40\n final1_score = \"Ad\"\n \n # if no players get an \"advantage\" or 'wins' the game\n else:\n final0_score = 40\n final1_score = 40\n \n else:\n \n # adds a 'point' to a 'player' and converts player 'points' to \n # 'scores' in a manner peculiar to tennis\n if number == 0:\n player0_points += 1\n final0_score = tennis_score[player0_points]\n \n else:\n player1_points += 1\n final1_score = tennis_score[player1_points]\n \n # updates the latest score difference\n points_diff = abs(player0_points - player1_points)\n \n # checks if a player gets an \"advantage\" / 'wins' the game at exactly \n # his 4th 'point'\n if (player0_points == 4 or player1_points == 4):\n \n # when a player 'won' the game\n if points_diff >= 2:\n \n if player0_points > player1_points:\n winner = 0\n final0_score = \"W\"\n else:\n winner = 1\n final1_score = \"W\"\n \n # when a player gets an \"advantage\"\n elif points_diff == 1:\n \n if player0_points > player1_points:\n final0_score = \"Ad\"\n else:\n final1_score = \"Ad\" \n \n # determines which player score is displayed first based on 'server'\n if server == 0:\n score = str(final0_score) + \"-\" + str(final1_score)\n else:\n score = str(final1_score) + \"-\" + str(final0_score)\n \n return (score, winner, remainder)", "def qualifiedteams(self):\n for i in range(0, len(self.teams)):\n for j in range(i + 1, len(self.teams)):\n WorldCupMatch(self.teams[i], self.teams[j], True)\n self.teams = sorted(self.teams, key=op.attrgetter('points', 'goaldifference', 'scored'))\n self.first_qualified = self.teams[len(self.teams)-1]\n self.second_qualified = self.teams[len(self.teams)-2]", "def review(self):\n # Compile standings\n self.standings = COMPOSE_LEAGUE_STANDINGS(season=self)\n # Name a champion\n self.champion = self._name_champion()\n self.league.history.champions_timeline[self.year] = self.champion\n print \"THE {} HAVE WON THE {} {} CHAMPIONSHIP!\".format(\n self.champion.team.name.upper(), self.year, self.league.name.upper()\n )\n # Compile league leaders\n # self.league_leaders = COMPOSE_LEAGUE_LEADERS(season=self)\n # Have each team review its season, as well\n for team_season in self.teams:\n team_season.review()\n # Send the league into the offseason\n self.league.season = None", "def get_game(self, game_id):\n \n session = requests.session()\n response = session.get(self.baseURL + str(game_id), headers=self.headers)\n soup = BeautifulSoup(response.text)\n \n #get teams\n defeated_by = False \n game_header = soup.find_all(text=re.compile('defeats'))\n \n if len(game_header) == 0:\n game_header = soup.find_all(text=re.compile('defeated by'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('defeat'))\n \n if (len(game_header)) == 0:\n game_header = soup.find_all(text=re.compile('drew'))\n defeated_by = True \n else:\n defeated_by = True \n\n if defeated_by: \n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[3]\n else:\n teams = self.remove_long_names(game_header[1]).replace('\\n', '')\n home_team = teams.split(' ')[0]\n away_team = teams.split(' ')[2]\n \n date_string = game_header[0].split(' ')\n date_string_find = [date.lower() for date in date_string]\n \n venue = date_string[date_string_find.index('at') + 1]\n \n #get round\n round_num = None\n \n try:\n date_string_find.remove('')\n except:\n pass\n \n try:\n round_num = int(date_string[date_string_find.index('round') + 1])\n except:\n try:\n round_num = date_string_find[date_string_find.index('final') - 1] + ' final'\n except:\n round_num = date_string_find[date_string_find.index('semi-final')]\n \n date = date_string[-3:]\n date = ' '.join(date) \n date = parser.parse(date)\n \n #get attendance\n attend = soup.find_all(text=re.compile('Attendance'))\n attendance = 0\n \n if (len(attend) > 3):\n attendance = int(attend[1].split(' ')[-1])\n \n #get stats \n away_stats = {}\n home_stats = {}\n \n for stat in stats:\n stat_row = soup.find_all('td', text=stat)[0].find_parent('tr')\n elements = stat_row.find_all('td')\n \n if elements[0].text == '-':\n home_stats[stat] = None\n else:\n home_stats[stat] = elements[0].text\n \n if elements[0].text == '-':\n away_stats[stat] = None\n else:\n away_stats[stat] = elements[2].text\n \n return Game(game_id, home_team, away_team, venue, round_num, date, attendance, home_stats, away_stats)", "def build_home_advantage(matches):\n D = {}\n df_goal = matches[[\"season\", \"home_team_goal\", \"away_team_goal\"]]\n\n for i in range(len(df_goal)):\n key_season = df_goal.iloc[i].season\n if key_season not in D:\n D[key_season] = [\n 1,\n df_goal.iloc[i].home_team_goal,\n df_goal.iloc[i].away_team_goal,\n ]\n else:\n D[key_season][0] += 1\n D[key_season][1] += df_goal.iloc[i].home_team_goal\n D[key_season][2] += df_goal.iloc[i].away_team_goal\n\n for key in D:\n D[key][1] /= D[key][0]\n D[key][2] /= D[key][0]\n\n df_goal_info = pd.DataFrame(D)\n column_headers = list(df_goal_info.columns.values)\n\n fig, ax = plt.subplots(nrows=1, ncols=1)\n fig.set_size_inches(w=7, h=4)\n plt.plot(column_headers, df_goal_info.iloc[1], label=\"avg_home_goal\")\n plt.plot(column_headers, df_goal_info.iloc[2], label=\"avg_away_goal\")\n pl.xticks(rotation=270)\n plt.xlabel(\"Season\")\n plt.ylabel(\"Average Goal\")\n plt.legend()\n plt.show()", "def find_new_contests(sport):\n\n # def get_pst_from_timestamp(timestamp_str):\n # timestamp = float(re.findall(\"[^\\d]*(\\d+)[^\\d]*\", timestamp_str)[0])\n # return datetime.datetime.fromtimestamp(\n # timestamp / 1000, timezone(\"America/Los_Angeles\")\n # )\n\n url = f\"https://www.draftkings.com/lobby/getcontests?sport={sport}\"\n\n # response = requests.get(url, headers=HEADERS, cookies=COOKIES).json()\n response_contests = get_contests(url)\n\n # create list of Contest objects\n contests = [Contest(c) for c in response_contests]\n # contests = [\n # get_largest_contest(response[\"Contests\"], 3),\n # get_largest_contest(response[\"Contests\"], 0.25),\n # get_largest_contest(response[\"Contests\"], 27),\n # ] + get_contests_by_entries(response[\"Contests\"], 3, 50000)\n target_contests = []\n entry_fees = []\n if sport == \"NFL\":\n entry_fees = [5, 10, 25, 50]\n else:\n entry_fees = [10, 25]\n\n for entry_fee in entry_fees:\n largest_contest = get_largest_contest(contests, entry_fee=entry_fee)\n # check if largest_contest is None\n if largest_contest is not None:\n logger.debug(\"Appending contest %s\", largest_contest)\n target_contests.append(largest_contest)\n\n for contest in target_contests:\n date_time = contest.start_dt\n # make naive datetime aware based on django settings\n aware_datetime = make_aware(date_time)\n dkcontest, created = DKContest.objects.update_or_create(\n dk_id=contest.id,\n defaults={\n \"date\": aware_datetime.date(),\n \"datetime\": aware_datetime,\n \"sport\": sport,\n \"name\": contest.name,\n \"draft_group_id\": contest.draft_group,\n \"total_prizes\": contest.total_prizes,\n \"entries\": contest.entries,\n \"entry_fee\": contest.entry_fee,\n },\n )\n if created:\n logger.info(\"Creating DKContest %s\", dkcontest)", "def get_league_goalie_boxcars(league, seasons):\n\n if len(set(seasons))==1:\n scraped_season_list = str(seasons)\n elif len(set(seasons))>2:\n scraped_season_list = str(((str(tuple(sorted(tuple(set(seasons))))).replace(\"'\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"[\", \"\").replace(\"]\", \"\"))).split(\", \")[:-1]).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\") + \", and \" + str(((str(tuple(sorted(tuple(set(seasons))))).replace(\"'\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"[\", \"\").replace(\"]\", \"\"))).split(\", \")[-1])\n else:\n scraped_season_list = str(((str(tuple(sorted(tuple(set(seasons))))).replace(\"'\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"[\", \"\").replace(\"]\", \"\"))).split(\", \")[:-1]).replace(\"'\", \"\").replace(\"[\", \"\").replace(\"]\", \"\") + \" and \" + str(((str(tuple(sorted(tuple(set(seasons))))).replace(\"'\", \"\").replace(\"(\", \"\").replace(\")\", \"\").replace(\"[\", \"\").replace(\"]\", \"\"))).split(\", \")[-1])\n \n \n global hidden_patrick\n hidden_patrick = 0\n global error\n error = 0\n \n output = pd.DataFrame()\n \n if type(seasons) == str:\n single = getgoalies(league, seasons)\n output = output.append(single)\n print(\"Scraping \" + league + \" data is complete. You scraped goalie data from \" + seasons + \".\")\n return(output)\n \n elif ((type(seasons) == tuple) or (type(seasons) == list)):\n \n for i in range(0, len(seasons)):\n try:\n single = getgoalies(league, seasons[i])\n output = output.append(single)\n except KeyboardInterrupt as e:\n hidden_patrick = 4\n error = e\n return output\n except (ConnectionError,\n HTTPError,\n ReadTimeout,\n ConnectTimeout) as e:\n hidden_patrick = 5\n error = e\n return output\n \n print(\"Scraping \" + league + \" data is complete. You scraped goalie data from \" + scraped_season_list + \".\") \n return(output)", "def season_game_logs(team, year):\n\n # Check year value\n if year > 2019 or year < 1950:\n raise ValueError('Year Value Incorrect')\n\n # Rename teams that moved\n team = scrape_utils.rename_team(team, year)\n\n # Get HTML content\n url = 'http://www.basketball-reference.com/teams/%s/%s/gamelog' % (team, year)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n season_stats = soup.find(id='tgl_basic')\n games = season_stats.find('tbody')\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # To find opponent statistics\n opponent = re.compile('^opp_.*$')\n\n # Loop through every game in a team's season\n for game in games.find_all('tr', {'class': None}):\n\n curr_team = {'team': team}\n opp_team = {}\n\n # Loop through each stat\n for stat in game.find_all('td'):\n\n stat_name = stat['data-stat']\n\n # These are opponent stats\n if re.match(opponent, stat_name):\n opp_team[stat_name[4:]] = scrape_utils.stat_parse(stat_name, stat.string)\n else:\n curr_team[stat_name] = scrape_utils.stat_parse(stat_name, stat.string)\n\n # Remove unnecessary information\n del curr_team['game_season']\n del curr_team['x']\n\n # Rename relocated teams\n curr_team['team'] = scrape_utils.rename_team(team)\n opp_team['team'] = scrape_utils.rename_team(opp_team.pop('id'))\n\n # Use the same ID as basketball reference\n result = {'date': datetime.strptime(curr_team.pop('date_game'), \"%Y-%m-%d\"),\n 'season': year,\n 'result': scrape_utils.determine_home_win(curr_team['game_location'], curr_team.pop('game_result')),\n '_id': game.find('a')['href'][-17:-5]}\n\n # Place the teams in the correct spot depending on who is the home team\n if curr_team.pop('game_location') == 0:\n result['home'] = curr_team\n result['away'] = opp_team\n else:\n result['home'] = opp_team\n result['away'] = curr_team\n\n # Insert into database\n m.insert('game_log', result)", "def Champs(self, summoner):\n encrypted_summoner_id = summoner.get('encrypted_summoner_id', '')\n region = summoner.get('region', DEFAULT_REGION)\n r = self._rito.ListChampionMasteries(region, encrypted_summoner_id)\n if r:\n logging.info('Got champ mastery data for %s/%s [%s]', region,\n encrypted_summoner_id, summoner['summoner'])\n # Calculate total number of chests received\n total_chests = sum(1 for x in r.champion_masteries if x.chest_granted)\n\n top_champs = []\n for champ in r.champion_masteries[:3]:\n top_champs.append(self._game.champion_id_to_name[str(\n champ.champion_id)])\n top_champ_lvl = r.champion_masteries[0].champion_level\n\n chest_verb = ''\n chest_verb_dict = {\n (0, 2): 'receiving',\n (2, 4): 'collecting',\n (4, 8): 'earning',\n (8, 16): 'amassing',\n (16, 32): 'hoarding'\n }\n for range_spec, verb in chest_verb_dict.items():\n if total_chests in range(*range_spec):\n chest_verb = verb\n break\n\n if chest_verb:\n chest_str = '%s %s' % (chest_verb,\n inflect_lib.Plural(total_chests, 'chest'))\n else:\n chest_str = 'with a boatload of chests (%d)' % total_chests\n\n return (u'{0} is a L{1} {2[0]} main, but sometimes likes to play {2[1]} '\n 'and {2[2]}, {3} this season.').format(summoner['summoner'],\n top_champ_lvl, top_champs,\n chest_str)" ]
[ "0.69078004", "0.66235226", "0.65739715", "0.64645517", "0.54283714", "0.53823304", "0.5381159", "0.5307479", "0.5282068", "0.52351725", "0.51759636", "0.51322395", "0.5129283", "0.50992453", "0.5090998", "0.5084592", "0.5073128", "0.5072545", "0.5070027", "0.50515336", "0.5047289", "0.50461006", "0.503849", "0.50384164", "0.5027115", "0.5018327", "0.50121963", "0.49724916", "0.4972198", "0.496628" ]
0.757074
0
Get teams who won the championship for each year
def get_championship_won_for_each_coach(self): self.championship_team = ( self.raw_data_postseason .merge(self.season_max_days,how='left',on=['Season']) .query("DayNum == season_max_days") .groupby(['Season','WTeamID']) .agg({"NumOT":"count"}) .reset_index() .rename(columns={"NumOT":"is_champion","WTeamID":"TeamID"}) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_player_games(self, year, use_local=True):", "def collect_teams(year):\n\n team_list = Team.objects.filter(year=year).order_by('location')\n teams = []\n for t in team_list:\n team = {\n 'id': t.abbreviation,\n 'team': t,\n }\n teams.append(team)\n return teams", "def get_teams():", "def collect_teams(year: int = 2005) -> None:\n\n\twith open('../resources/config.json') as config_file, open('../resources/secrets.json') as secrets_file:\n\t\tconfig_json = json.load(config_file)\n\t\tsecrets_json = json.load(secrets_file)\n\n\t\turl = '/'.join(['http:', '', config_json['base_url'], config_json['fbs_teams_endpoint']])\n\t\tapi_key = secrets_json['api_key']\n\n\theaders = {'Authorization': api_key}\n\tparams = {'year': year}\n\n\tresponse = requests.get(url, headers = headers, params = params).json()\n\n\t# dict of one array for json dump\n\tteam_names = {'teamNames': list(map(lambda r: r['school'], response))}\n\n\twith open('../resources/teams.json', 'w') as teams_file:\n\t\tjson.dump(team_names, teams_file)", "def get_all_cfb_teams(year):\n \n url = BASE_URL + '/cfb/years/{}-standings.html'.format(year)\n soup = get_soup(url)\n \n table = soup.find('table', attrs={'id':'standings'})\n rows = table.find_all('tr')\n \n team_list = []\n \n for row in rows:\n team = row.find('td', attrs={'data-stat':'school_name'})\n \n if not team:\n continue\n \n team_link = team.find('a').get('href') \n team_list.append(team_link)\n \n return team_list", "def wins(self):\n return [g for g in self.games if g.winner is self.team]", "def test_get_teams_in_year_names():\n assert sorted(gtiy(2008)) == sorted(team_2008)\n assert sorted(gtiy(2009)) == sorted(team_2009)\n assert sorted(gtiy(2010)) == sorted(team_2010)\n assert sorted(gtiy(2011)) == sorted(team_2011)\n assert sorted(gtiy(2012)) == sorted(team_2012)\n assert sorted(gtiy(2013)) == sorted(team_2013)\n assert sorted(gtiy(2014)) == sorted(team_2014)\n assert sorted(gtiy(2015)) == sorted(team_2015)\n assert sorted(gtiy(2016)) == sorted(team_2016)\n assert sorted(gtiy(2017)) == sorted(team_2017)", "def non_current_championships():\n current_championships = (Alfa_Romeo+Ferrari+Haas+McLaren+Mercedes+Racing_Point+Red_Bull+Renault+Toro_Rosso+Williams).constructors_championships_years\n non_current_championships = []\n year = 1958\n while year < 2020:\n if year not in current_championships:\n non_current_championships.append(year)\n year += 1\n return f\"The F1 Constructors' Championships won by teams no longer on the grid are: \\n{non_current_championships}\"", "def test_get_teams_in_year_len():\n assert len(gtiy(2008)) == 8\n assert len(gtiy(2009)) == 8\n assert len(gtiy(2010)) == 8\n assert len(gtiy(2011)) == 10\n assert len(gtiy(2012)) == 9\n assert len(gtiy(2013)) == 9\n assert len(gtiy(2014)) == 8\n assert len(gtiy(2015)) == 8\n assert len(gtiy(2016)) == 8\n assert len(gtiy(2017)) == 8", "def get_all_cfb_teams_for_year_range(start_year, end_year):\n \n full_team_list = []\n \n for year in range(start_year, end_year + 1):\n print(year)\n \n team_list = get_all_cfb_teams(year)\n full_team_list += team_list\n \n wait_time = random.randint(3, 15)\n time.sleep(wait_time)\n \n return full_team_list", "def get_player_stats_from_game(team, year, week):", "def get_companies_and_people(team):", "def teams(self):\n t = [e[0] for e in self.pick_set.all().values_list(\"team\")]\n return Team.objects.filter(id__in=set(t))", "def get_teams_from_league(teams, league):\n\t\n\tteams_1 = []\n\tfor i in range(0,len(teams)):\n\t\tif teams[i].league == 1:\n\t\t\tteams_1.append(teams[i])\n\n\treturn teams_1", "def get_available_companies(team):", "def get_available_companies_and_people(team):", "def getTeamStat(self, year = 2014):\r\n \r\n year_next = (year % 100) + 1\r\n season = str(year) + '-' + str(year_next)\r\n \r\n stat_url = 'http://stats.nba.com/stats/leaguedashteamstats?Conference=&'\\\r\n 'DateFrom=&DateTo=&Division=&GameScope=&GameSegment=&'\\\r\n 'LastNGames=0&LeagueID=00&Location=&MeasureType=Base&'\\\r\n 'Month=0&OpponentTeamID=0&Outcome=&PORound=0&PaceAdjust=N&'\\\r\n 'PerMode=PerGame&Period=0&PlayerExperience=&PlayerPosition=&'\\\r\n 'PlusMinus=N&Rank=N&Season=' + season + '&SeasonSegment=&'\\\r\n 'SeasonType=Regular+Season&ShotClockRange=&StarterBench=&'\\\r\n 'TeamID=0&VsConference=&VsDivision='\r\n \r\n response = requests.get(stat_url)\r\n data = json.loads(response.text)\r\n \r\n headers = data['resultSets'][0]['headers']\r\n stat_data = data['resultSets'][0]['rowSet']\r\n df = pd.DataFrame(stat_data,columns=headers) \r\n \r\n team_df = df[[\"TEAM_ID\",\"TEAM_NAME\",\"GP\",\"W\",\"L\",\"W_PCT\",\"MIN\",\"FGM\",\r\n \"FGA\",\"FG_PCT\",\"FG3M\",\"FG3A\",\"FG3_PCT\",\"FTM\",\"FTA\",\"FT_PCT\",\r\n \"OREB\",\"DREB\",\"REB\",\"AST\",\"TOV\",\"STL\",\"BLK\",\"BLKA\",\"PF\",\r\n \"PFD\",\"PTS\",\"PLUS_MINUS\"]]\r\n \r\n return team_df", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def get_team_names(self, year1, year2):\n\n base_url = 'http://www.sports-reference.com/cbb/schools/'\n response = urllib2.urlopen(base_url)\n content = response.read()\n soup = BeautifulSoup(content, 'html.parser')\n soup_results = soup.findAll('tr', {'class':''})\n extract_name = lambda name: name.split('/')[3]\n team_names = []\n \n for result in soup_results[1::]:\n year_span = result.findAll('td', {'align':'center'}) \n year_span = map(int, [year.string for year in year_span])\n\n if year_span[0] <= year1 and year_span[1] >= year2:\n team_name = result.find('a', href = True).get('href')\n team_name = extract_name(team_name)\n team_names.append(str(team_name))\n\n self.team_names = team_names", "def scrape_all_world_cup_games():\n\n def scrape_scores_year(year):\n urls = scrape_world_cup_scoreboard(year)\n scores = [scrape_fifa_game(url, 'FIFA World Cup') for url in urls]\n return scores\n\n l = []\n for year in sorted(world_cup_mapping.keys()):\n l.extend(scrape_scores_year(year))\n return l", "def season_games(year):\n\tLOG.debug('Getting season %d', year)\n\tdata = read_html(io=season_games_url(year),\n\t\t\t\t\t attrs={'id': 'games'},\n\t\t\t\t\t infer_types=False,\n\t\t\t\t\t header=0)\n\tif len(data) != 1:\n\t\traise CantFindTheRightTable\n\tdata = data.pop()\n\n\t# Cleaning.\n\tdel data[\"Unnamed: 3\"]\n\t# The code below issues \"UserWarning: \" So we catch UserWarnings.\n\twith warnings.catch_warnings():\n\t\twarnings.filterwarnings(action='ignore', category=UserWarning,\n\t\t\t\t\t\t\t\tmodule=r'pandas\\.core\\.frame',\n\t\t\t\t\t\t\t\tmessage=(r\"Boolean Series key will be reindexed\"\n\t\t\t\t\t\t\t\t\t\t r\" to match DataFrame index\\.\"))\n\t\t# These rows are mid-table header rows.\n\t\tdata = data[data.Week != \"Week\"][data.Week != \"nan\"]\n\n\tdata['week'] = (data.Week\n\t\t\t\t\t.replace(\"WildCard\", \"wild-card\")\n\t\t\t\t\t.replace(\"Division\", \"divisional\")\n\t\t\t\t\t.replace(\"ConfChamp\", \"conference\")\n\t\t\t\t\t.replace(\"SuperBowl\", \"super-bowl\")\n\t\t\t\t\t.apply(\n\t\t\t\t\t\tlambda s: (int(s)\n\t\t\t\t\t\t\t\t if all(c in '1234567890' for c in s)\n\t\t\t\t\t\t\t\t else s)))\n\tdel data['Week']\n\n\tdata['season'] = year\n\tdata['game_date'] = pd.to_datetime(\n\t\tdata.Date\n\t\t.replace(r\"$\", r\", %d\" % year, regex=True)\n\t\t.replace(r\"^(January|February) (\\d+), \\d+$\", r\"\\1 \\2, %d\" % (year + 1),\n\t\t\t\t regex=True))\n\tdel data['Date']\n\n\tfor column in \"PtsW\", \"PtsL\", \"YdsW\", \"TOW\", \"YdsL\", \"TOL\":\n\t data[column] = data[column].apply(int)\n\n\tdata['WatL'] = data['Unnamed: 5'].apply(lambda x: x == '@')\n\tdel data['Unnamed: 5']\n\tdata['hometeam'] = (~data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\tdata.WatL * data['Loser/tie'])\n\tdata['awayteam'] = (data.WatL * data['Winner/tie'] +\n\t\t\t\t\t\t~data.WatL * data['Loser/tie'])\n\tdata['winner'] = data['Winner/tie']\n\tfor column in 'Winner/tie', 'Loser/tie', \"WatL\":\n\t\tdel data[column]\n\tfor column in 'hometeam', 'awayteam', 'winner':\n\t\tdata[column] = data[column].apply(lambda s: s.split()[-1].lower())\n\n\treturn data", "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def get_leagues_and_countries(source=utils.get_native_source):\n if not isinstance(source, games.models.Source):\n # If I used source=native_source() or if native_source was a global variable then\n # during db initialization (running command initialize) you would get an error since\n # it gets its value when the database is empty.\n source = source()\n logger.info(\"getting leagues and countries from source %s...\", source)\n if not source:\n return [], []\n data, meta, status_code = sportmonks.countries.all(include='leagues.seasons')\n if not data:\n # if the status code is not 200 data and meta are None\n return [], []\n # with open('sportmonks/response_texts/aws_01.txt', 'w') as outfile:\n # json.dump(meta, outfile, indent=4)\n # json.dump(data, outfile, indent=4)\n\n pre_countries, pre_competitions = [], []\n\n try:\n # Notice that only the first supported sport will be processed (currently this is is acceptable since we only\n # support football and so the first supported sport will always be football)\n sport_sids = parse_sport(meta)\n sports = []\n for sport_sid in sport_sids:\n sport = games.models.Sport.by_sid(sid=sport_sid, source=source)\n if not sport:\n logger.info(\"Sport contained in the response with sid {} is not supported\".format(sport_sid))\n continue\n sports.append(sport)\n if not sports:\n logger.error(\"No supported sport in the response\")\n return [], []\n football_gname = games.naming.sport_names.get('football', None)\n football = games.models.Sport.objects.get(name=football_gname)\n if football not in sports:\n logger.info(\"Football is not in response\")\n return [], []\n # logger.debug(\"Trying to get sport from source: %s and sid: %s\", source, sport_sid)\n sport_gname = football_gname\n for item in data:\n try:\n country_sid = item.get('id')\n # logger.debug('country_sid: %s', country_sid)\n country_sname = item.get('name')\n # logger.debug('country_sname: %s', country_sname)\n extra = item.get('extra')\n # logger.debug('extra: %s', extra)\n leagues = item.get('leagues').get('data')\n # logger.debug('leagues: %s', leagues)\n try:\n fifa_code = extra.get('fifa') # some countries might lack extra information\n except AttributeError:\n fifa_code = None\n except Exception as e:\n logger.data_error('%s', e)\n continue\n pre_country = pre_models.PreCountry(source=source, sname=country_sname, sid=country_sid, fifa_code=fifa_code)\n pre_countries.append(pre_country)\n for league in leagues:\n try:\n # sportmonks uses sgname for leagues. I use this sgname as an sname (comp_season_specific name)\n competition_sname = league.get('name')\n # logger.debug('competition_sname: %s', competition_sname)\n sid = league.get('id')\n # logger.debug('sid: %s', sid)\n seasons = league.get('seasons').get('data')\n # logger.debug('seasons: %s', seasons)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n competition_season_utils = []\n # comp_seas_sids = []\n for season in seasons:\n try:\n season_name = season.get('name')\n # logger.debug('season_name: %s', season_name)\n # season_name = seasons_special_treatment(season_name)\n competition_season_sid = season.get('id')\n # logger.debug('competition_season_sid: %s', competition_season_sid)\n is_current_season = season.get('is_current_season', False)\n # logger.debug('is_current_season: %s', is_current_season)\n except Exception as e:\n logger.data_error('%s', e)\n continue\n # comp_seas_sids.append(competition_season_sid)\n zak_season_name = games.models.Season.zakandify_season_string(season_name)\n season = zakanda.utils.season_from_season_name(zak_season_name)\n competition_season_type = get_competition_season_type(season)\n competition_season_util = pre_models.CompetitionSeasonUtil(season, competition_season_sid, competition_sname, competition_season_type)\n competition_season_utils.append(competition_season_util)\n # logger.debug(\"competition season sids: %s\", comp_seas_sids)\n pre_competition = pre_models.PreCompetition(\n source=source, sname=competition_sname, sid=sid, sport_name=sport_gname,\n competition_season_utils=competition_season_utils, pre_country=pre_country)\n pre_competitions.append(pre_competition)\n\n except Exception as e:\n logger.error('%s Unexpected problem with sportmonks.countries.all from source %s', e, source)\n logger.info(\"%s pre countries and %s pre competitions were created\", len(pre_countries), len(pre_competitions))\n return pre_countries, pre_competitions", "def get_people(team):", "def getAllTeams(self):\n return []", "def get_all_matches_by_league(self):\n raise NotImplementedError", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def get_teams():\n teams = []\n for teamId in range(1, 68):\n t = requests.get(TEAMS_URL.format(teamId)).json()\n team_list = t.get('teams')\n if team_list is None or len(team_list) == 0:\n continue\n teams.append(Team.from_json(team_list[0]))\n return teams", "def teams(self, game_id: int) -> DataFrame[Any]:", "def fa_finder(league_no, team_name):\n ros_proj_b_list = BatterProjection.objects.all()\n ros_proj_p_list = PitcherProjection.objects.all()\n player_comp = {}\n pitching_fa_list = yahoo_players(league_no, \"P\")\n batting_fa_list = yahoo_players(LEAGUE_NO, \"B\")\n avail_pitching_fas = rate_avail_players(pitching_fa_list, ros_proj_p_list)\n yahoo_team = get_single_yahoo_team(league_no, team_name)\n team_pitching_values = rate_team(yahoo_team, ros_proj_p_list)\n avail_batting_fas = rate_avail_players(batting_fa_list, ros_proj_b_list)\n team_batting_values = rate_team(yahoo_team, ros_proj_b_list)\n\n player_comp['Team Name'] = yahoo_team['team_name']\n player_comp['Pitching FAs'] = avail_pitching_fas\n player_comp['Pitching Team'] = team_pitching_values\n player_comp['Batting FAs'] = avail_batting_fas\n player_comp['Batting Team'] = team_batting_values\n\n return player_comp" ]
[ "0.70646495", "0.67921704", "0.6662633", "0.6608853", "0.6473593", "0.6421649", "0.6351773", "0.6312248", "0.6311487", "0.63097686", "0.6242666", "0.6128153", "0.6068329", "0.6029508", "0.6004093", "0.59811854", "0.5938113", "0.593734", "0.5913684", "0.5901109", "0.58794063", "0.58521336", "0.58507377", "0.5808142", "0.5789037", "0.57742673", "0.5747852", "0.5739689", "0.5730904", "0.56723785" ]
0.6963645
1
Get win rate for regular season for each coach
def get_win_rate_regular_season_for_each_coach(self): self.games_won_for_coaches = ( self.raw_data_regularseason [['Season','DayNum','WTeamID']] # merge for winning team .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']], how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID']) .rename(columns={"FirstDayNum":"FirstDayNum_win","LastDayNum":"LastDayNum_win","CoachName":"CoachName_win","TeamID":"TeamID_win"}) .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0))) .query("which_coach_for_win != 0") .groupby(['Season','CoachName_win','WTeamID']) .agg({"which_coach_for_win":"sum"}) .reset_index() ) self.games_lose_for_coaches = ( self.raw_data_regularseason [['Season','DayNum','LTeamID']] # merge for losing team .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']], how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID']) .rename(columns={"FirstDayNum":"FirstDayNum_lose","LastDayNum":"LastDayNum_lose","CoachName":"CoachName_lose","TeamID":"TeamID_lose"}) .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0))) .query("which_coach_for_lose != 0") .groupby(['Season','CoachName_lose','LTeamID']) .agg({"which_coach_for_lose":"sum"}) .reset_index() ) # combine games won and lost df self.combine_regular_games_won_lose = ( self.games_lose_for_coaches .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win']) .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose))) .drop(['CoachName_win','WTeamID'],1) .rename(columns={"CoachName_lose":"CoachName","LTeamID":"TeamID","which_coach_for_lose":"games_lost","which_coach_for_win":"games_won"}) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_win_rate_post_season_for_each_coach(self):\n # get winning games for coaches\n self.post_games_won_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n # get losing games for coaches\n self.post_games_lose_for_coaches = (\n self.raw_data_postseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df for post season\n self.combine_post_games_won_lose = (\n self.post_games_lose_for_coaches\n .merge(self.post_games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_post = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"post_games_lost\",\"which_coach_for_win\":\"post_games_won\"})\n .fillna(0)\n )", "def get_champion_winrate(self, summoner_id, champion_id):\n request = rq.get(\n 'https://{region}.api.pvp.net/api/lol/{region}/v{api_v}/stats/by-summoner/{summ_id}/ranked?season=SEASON{year}&api_key={api_key}'\n .format(\n region=self.region,\n api_v=api_version['stats'],\n summ_id=summoner_id,\n year=dt.today().year,\n api_key=API_KEY\n )\n )\n try:\n check_response(request)\n champions = request.json()['champions']\n if champions is not None:\n for champion in champions:\n if champion['id'] == champion_id:\n total_won = champion['stats']['totalSessionsWon']\n total = total_won + champion['stats']['totalSessionsLost']\n\n winrate = total_won / total\n return [winrate, total]\n return 0, 0\n except ServiceException:\n return 0, 0", "def get_championship_won_for_each_coach(self):\n self.championship_team = (\n self.raw_data_postseason\n .merge(self.season_max_days,how='left',on=['Season'])\n .query(\"DayNum == season_max_days\")\n .groupby(['Season','WTeamID'])\n .agg({\"NumOT\":\"count\"})\n .reset_index()\n .rename(columns={\"NumOT\":\"is_champion\",\"WTeamID\":\"TeamID\"})\n )", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def _compute_winrates(synergy, counter, heroes_released):\n for i in range(heroes_released):\n for j in range(heroes_released):\n if i != j and i != 23 and j != 23:\n if synergy['games'][i, j] != 0:\n synergy['winrate'][i, j] = synergy['wins'][i, j] / \\\n float(synergy['games'][i, j])\n\n if counter['games'][i, j] != 0:\n counter['winrate'][i, j] = counter['wins'][i, j] / \\\n float(counter['games'][i, j])", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def marcels_players(goalie, date, df):\n # 0 = that year, 1 is year b4 ....\n marcel_weights = [.36, .29, .21, .14]\n reg_const = 2000\n reg_avg = 0 # Where to regress to\n\n # Use past 3 season to weight games played -> Just take weighted average\n gp_weights = [8, 4, 2, 0]\n\n season = int(helpers.get_season(date))\n\n weighted_goals_sum, weighted_fen_sum, weighted_xg_sum, weights_marcel_sum = 0, 0, 0, 0\n weighted_gp_sum, weights_gp_sum = 0, 0\n\n # Past 4 Seasons\n for i in range(0, 4):\n if season - i > 2006:\n # Subset from stats df\n df_goalie = df[(df['player'] == goalie) & (df['season'] == (season - i))]\n\n # Sanity Check\n if df_goalie.shape[0] > 1:\n print(\"Too many rows!!!!!!!\")\n exit()\n\n # If he played that year\n if not df_goalie.empty:\n weighted_goals_sum += df_goalie.iloc[0]['goals_a'] * marcel_weights[i]\n weighted_fen_sum += df_goalie.iloc[0]['fenwick_a'] * marcel_weights[i]\n weighted_xg_sum += df_goalie.iloc[0]['xg_a'] * marcel_weights[i]\n weighted_gp_sum += df_goalie.iloc[0]['games'] * gp_weights[i]\n\n # -> To divide by at end...normalize everything\n weights_marcel_sum += marcel_weights[i]\n weights_gp_sum += gp_weights[i]\n\n # Normalize weighted sums\n weighted_xg_sum = weighted_xg_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_goals_sum = weighted_goals_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_fen_sum = weighted_fen_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n\n # Get Regressed fsv%\n if weighted_fen_sum != 0:\n weighted_adj_fsv = ((1 - weighted_goals_sum / weighted_fen_sum) - (1 - weighted_xg_sum / weighted_fen_sum)) * 100\n else:\n weighted_adj_fsv = 0\n reg_adj_fsv = weighted_adj_fsv - ((weighted_adj_fsv - reg_avg) * (reg_const / (reg_const + weighted_fen_sum)))\n\n # Get weighted gp\n weighted_gp_sum = weighted_gp_sum / weights_gp_sum if weights_gp_sum != 0 else 0\n\n return {'fsv': reg_adj_fsv, 'gp': weighted_gp_sum}", "def get_final_coach_for_each_season(self):\n self.final_coach_for_season = (\n self.num_days_coach_for_season\n .groupby(['Season','TeamID'])\n .agg({\"CoachName\":\"count\"})\n .reset_index()\n .rename(columns={\"CoachName\":\"coach_counts\"})\n .merge(self.num_days_coach_for_season,how='left',on=['Season','TeamID'])\n .pipe(lambda x:x.assign(final_coach = np.where(x.num_season >= 0.5, x.CoachName, \"ignore\")))\n [['Season','TeamID','final_coach']]\n )", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def get_player_stats_from_game(team, year, week):", "def calc_winner(self):\n pass", "def getSeasonStats(self):\n df_season_agg = self.toSeasonAggFormat()\n\n # Calculate Possessions for each game\n df_season_agg['possessions'] = 0.5 * (df_season_agg['FGA'] + 0.475 * df_season_agg['FTA'] - df_season_agg['OR'] + df_season_agg['TO']) \\\n + 0.5 * (df_season_agg['OppFGA'] + 0.475 * df_season_agg['OppFTA'] - df_season_agg['OppOR'] + df_season_agg['OppTO'])\n\n # Aggregate to Season Summary Level\n season_stats = df_season_agg.groupby(['TeamID', 'Season']).sum()\n\n season_stats = season_stats.rename(columns={'Win':'wins'})\n\n # Season Advanced Stats\n season_stats['o_eff'] = season_stats['Score'] / season_stats['possessions'] * 100\n season_stats['d_eff'] = season_stats['OppScore'] / season_stats['possessions'] * 100\n season_stats['net_eff'] = season_stats['o_eff'] - season_stats['d_eff']\n\n season_stats.drop('DayNum', axis=1, inplace=True)\n season_stats.drop('OppTeamID', axis=1, inplace=True)\n season_stats.drop('rand', axis=1, inplace=True)\n\n return season_stats", "def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()", "def getWinProbability(team1, team2, r, year=2019):\n\tR1_PROBS = R1_PROBS_2019 if year == 2019 else R1_PROBS_2020\n\tALPHA_VALS = ALPHA_VALS_2019 if year == 2019 else ALPHA_VALS_2020\n\n\t# Currently using Power Model\n\ts1 = team1['seed']\n\ts2 = team2['seed']\n\n\t# Use R1_PROBS for round 1\n\tif r == 1:\n\t\tif not (s1 + s2 == 17):\n\t\t\texit('Invalid round 1 matchup: seeds {0} vs. {1}.'.format(s1, s2))\n\t\treturn R1_PROBS[s1] if s1 < s2 else R1_PROBS[s2]\n\t\n\t# Use ALPHA_VALS for other rounds (unless seeds are same)\n\tif s1 == s2:\n\t\treturn 0.5\n\n\talpha = ALPHA_VALS[r]\n\ts1a = (s1 * 1.0) ** alpha\n\ts2a = (s2 * 1.0) ** alpha\n\treturn s2a / (s1a + s2a)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def get_team_round_score(self, team: int, match_round: int) -> int:\n assert self.rounds, \"There are no rounds or the requested round doesn't exist\"\n\n team_score = 0\n\n if team == 1:\n for player in self.rounds[match_round - 1].team1.players:\n team_score += player.scores\n else:\n for player in self.rounds[match_round - 1].team2.players:\n team_score += player.scores\n\n return team_score", "def get_new_ratings(players, teams):\n nb_players_team0 = len(teams[0])\n nb_players_team1 = len(teams[1])\n winner = players[teams[0][0]]\n loser = players[teams[1][0]]\n if nb_players_team0 == 1 and nb_players_team1 == 1:\n new_r1, new_r3 = rate_1vs1(winner,loser)\n elif nb_players_team0 == 1 and nb_players_team1 > 1:\n team_loser = [loser, players[teams[1][1]]]\n (new_r1), (new_r3, new_r4) = rate([winner, team_loser], ranks=[0, 1]) \n elif nb_players_team0 > 1 and nb_players_team1 == 1:\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3) = rate([team_winner, loser], ranks=[0, 1]) \n else:\n team_loser = [loser, players[teams[1][1]]]\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3, new_r4) = rate([team_winner, team_loser], ranks=[0, 1]) \n player1 = {'name': teams[0][0], 'mu': new_r1.mu, 'sigma': new_r1.sigma}\n player3 = {'name': teams[1][0], 'mu': new_r3.mu, 'sigma': new_r3.sigma}\n if nb_players_team0 > 1:\n player2 = {'name': teams[0][1], 'mu': new_r2.mu, 'sigma': new_r2.sigma}\n if nb_players_team1 > 1:\n player4 = {'name': teams[1][1], 'mu': new_r4.mu, 'sigma': new_r4.sigma}\n if nb_players_team0 > 1:\n return [player1, player2, player3, player4]\n return [player1, player2, player4]\n return [player1, player3]", "def win_ratio_avg(self):\n win_ratio = 0\n # Adds all the win ratios of team in this conference which will be\n # used to compute the win ratio average.\n for team_obj in self._conf_teams:\n ### INVARIANT: team_obj is a Team class object and\n ### self._conf_teams is a list of Team class objects.\n win_ratio += team_obj._win_ratio\n return win_ratio/len(self._conf_teams)", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def evaluate(game, player):\n weights = [2, 200, 2000, 20000]\n reward = 0\n opponent = get_opponent(player)\n for length in range(2, 6):\n reward += weights[length - 2] * get_num_series(game, player, length)\n reward -= weights[length - 2] * get_num_series(game, opponent, length)\n return reward", "def calc_match_points(self, match):\n if match.winner == match.TIE:\n match.home.tournament_score += 1\n match.away.tournament_score += 1\n else:\n match.winner.tournament_score += 3\n match.loser.tournament_score += 0", "def glicko2_summarize(df, min_weeks=4, use_prior=False):\n\n # Loop through seasons and weeks to create full history of ratings by team\n results = pd.DataFrame()\n for season in df['Season'].sort_values().unique():\n for week in df[df['Season']==season]['Week'].sort_values().unique():\n if week > min_weeks:\n if week == min_weeks + 1:\n season_df = df[df['Season']==season].copy()\n uniqueteamids = pd.concat([season_df['VisID'],\n season_df['HomeID']]).unique()\n if use_prior == True and season > df['Season'].min():\n ratings = np.repeat(1500, len(uniqueteamids))\n ratingsdeviance = np.repeat(350, len(uniqueteamids))\n sigma = np.repeat(0.06, len(uniqueteamids))\n glicko_stats = pd.DataFrame({'ratings': ratings,\n 'ratingsdeviance': ratingsdeviance,\n 'sigma': sigma}, index=uniqueteamids)\n prior = results[results['Season']==season-1]\n prior_id_mask = [True if id in uniqueteamids else False for id in prior['TeamID']]\n prior = prior[prior_id_mask]\n prior = prior.sort_values('Week').groupby('TeamID').tail(1)\n prior = prior.drop('Week',1)\n prior = prior.set_index('TeamID')\n glicko_stats.loc[prior.index, 'ratings'] = prior['Glicko_Rating'] - (prior['Glicko_Rating'] - 1500)/2\n glicko_stats.loc[prior.index, 'ratingsdeviance'] = prior['Glicko_Rating_Deviance'] - (prior['Glicko_Rating_Deviance'] - 350)/2\n glicko_stats.loc[prior.index, 'sigma'] = prior['Glicko_Sigma'] - (prior['Glicko_Sigma'] - 0.06)/2\n else:\n ratings = np.repeat(1500, len(uniqueteamids))\n ratingsdeviance = np.repeat(350, len(uniqueteamids))\n sigma = np.repeat(0.06, len(uniqueteamids))\n glicko_stats = pd.DataFrame({'ratings': ratings,\n 'ratingsdeviance': ratingsdeviance,\n 'sigma': sigma}, index=uniqueteamids)\n\n week_df = df[(df['Season']==season) & (df['Week']<week)].copy()\n glicko_stats = glicko2(week_df, uniqueteamids, glicko_stats)\n\n\n glicko_results = glicko_stats.reset_index()\n print(glicko_results.head(), season)\n glicko_results.columns = ['TeamID','Glicko_Rating',\n 'Glicko_Rating_Deviance',\n 'Glicko_Sigma']\n glicko_results['Season'] = season\n glicko_results['Week'] = week\n results = pd.concat([results, glicko_results], axis=0,\n ignore_index=True)\n\n # Join the ratings to the original schedule of games\n df = df.merge(results, left_on=['Season','Week','HomeID'],\n right_on=['Season','Week','TeamID'],\n suffixes=('','_Home'))\n df.drop('TeamID', 1, inplace=True)\n\n df = df.merge(results, left_on=['Season','Week','VisID'],\n right_on=['Season','Week','TeamID'],\n suffixes=('','_Away'))\n df.drop('TeamID', 1, inplace=True)\n\n # Create key and set index to join with n_game summaries dataset.\n df.set_index(['HomeID', 'VisID', 'Season', 'Week'], inplace=True)\n df = df[['Glicko_Rating', 'Glicko_Rating_Deviance', 'Glicko_Sigma',\n 'Glicko_Rating_Away', 'Glicko_Rating_Deviance_Away',\n 'Glicko_Sigma_Away']]\n df.columns = ['Glicko_Rating_Home', 'Glicko_Rating_Deviance_Home',\n 'Glicko_Sigma_Home', 'Glicko_Rating_Away',\n 'Glicko_Rating_Deviance_Away', 'Glicko_Sigma_Away']\n\n return df", "def calc_win_lose_ratio(self):\n total = len(self.train_y)\n survived = 0\n for i in self.train_y:\n if i > 0:\n survived += 1\n\n self.survival_sum = [survived, total-survived]", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def average_win_rate(strategy, baseline=always_roll(4)):\n win_rate_as_player_0 = 1 - make_averaged(winner)(strategy, baseline)\n win_rate_as_player_1 = make_averaged(winner)(baseline, strategy)\n\n return (win_rate_as_player_0 + win_rate_as_player_1) / 2", "def get_tourney_rounds(self, conference, year):\n ts_dict = self.get_tourney_slots()\n seed_dict = self.get_tourney_seeds()\n tr_dict = self.get_tourney_results()\n \n round_1 = list()\n round_2 = list()\n round_3 = list()\n round_4 = list()\n winner = list()\n \n round1_winners = list()\n for seed, team in seed_dict[year].items():\n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n round1_winners.append(seed[1:])\n #removes duplicates because I did this part weirdly... HEHEH\n round1_winners = list(set(round1_winners))\n\n win_counter = defaultdict(int)\n for seed, team in seed_dict[year].items(): \n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n win_counter[winning] += 1\n \n for slot, matchup in ts_dict[year].items():\n \n if conference in slot and \"R1\" in slot: \n round_1.append(\"{}-{}\".format(matchup[1:3], matchup[-2:]))\n round_1 = sorted(round_1)\n #for match in round_1:\n for winner1 in round1_winners:\n if winner1 in round_1[0]:\n for winner2 in round1_winners:\n if winner2 in round_1[-1]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[1]:\n for winner2 in round1_winners:\n if winner2 in round_1[-2]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[2]:\n for winner2 in round1_winners:\n if winner2 in round_1[-3]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[3]:\n for winner2 in round1_winners:\n if winner2 in round_1[-4]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n round_2 = sorted(round_2)\n\n round2_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 1:\n round2_winners.append(seed[1:])\n \n for winner1 in round2_winners:\n if winner1 in round_2[0]:\n for winner2 in round2_winners:\n if winner2 in round_2[-1]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_2[1]:\n for winner2 in round2_winners:\n if winner2 in round_2[-2]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n round_3 = sorted(round_3)\n\n round3_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 2:\n round3_winners.append(seed[1:])\n\n for winner1 in round3_winners:\n if winner1 in round_3[0]:\n for winner2 in round3_winners:\n if winner2 in round_3[-1]:\n round_4.append(\"{}-{}\".format(winner1, winner2))\n round_4 = sorted(round_4)\n\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 3:\n winner.append(seed[1:])\n\n conferences = {\"W\": \"East\", \"X\": \"Midwest\", \"Y\": \"South\", \"Z\": \"West\"}\n\n #print(\"CONFERENCE: {}, YEAR: {}\".format(conferences[conference], year))\n #print(\"ROUND1:\", round_1)\n #print(\"ROUND2:\", round_2)\n #print(\"ROUND3:\", round_3)\n #print(\"ROUND4:\", round_4)\n #print(\"WINNER:\", winner)\n\n #clearing out the tourney results dictionary\n #tr_dict.clear()\n\n return round_1, round_2, round_3, round_4, winner", "def get_win_percentage(self) -> float:\n if self.wins == 0:\n return 0.0\n else:\n return round((self.wins / (self.wins + self.losses)) * 100, 2)", "def winning_percentage(self):\n return float(len(self.wins))/len((self.wins+self.losses))" ]
[ "0.7728886", "0.6802758", "0.66279066", "0.6522059", "0.64248955", "0.6424082", "0.6392534", "0.62645185", "0.6220719", "0.6031486", "0.6006317", "0.5837831", "0.5770262", "0.57343024", "0.57129455", "0.5707701", "0.56960624", "0.5694389", "0.56909186", "0.56840575", "0.56737727", "0.5627025", "0.56268334", "0.5622924", "0.5618245", "0.5618245", "0.5618245", "0.56104475", "0.56102824", "0.5572643" ]
0.8015784
0
Get win rate for post season for each coach
def get_win_rate_post_season_for_each_coach(self): # get winning games for coaches self.post_games_won_for_coaches = ( self.raw_data_postseason [['Season','DayNum','WTeamID']] # merge for winning team .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']], how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID']) .rename(columns={"FirstDayNum":"FirstDayNum_win","LastDayNum":"LastDayNum_win","CoachName":"CoachName_win","TeamID":"TeamID_win"}) .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0))) .query("which_coach_for_win != 0") .groupby(['Season','CoachName_win','WTeamID']) .agg({"which_coach_for_win":"sum"}) .reset_index() ) # get losing games for coaches self.post_games_lose_for_coaches = ( self.raw_data_postseason [['Season','DayNum','LTeamID']] # merge for losing team .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']], how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID']) .rename(columns={"FirstDayNum":"FirstDayNum_lose","LastDayNum":"LastDayNum_lose","CoachName":"CoachName_lose","TeamID":"TeamID_lose"}) .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0))) .query("which_coach_for_lose != 0") .groupby(['Season','CoachName_lose','LTeamID']) .agg({"which_coach_for_lose":"sum"}) .reset_index() ) # combine games won and lost df for post season self.combine_post_games_won_lose = ( self.post_games_lose_for_coaches .merge(self.post_games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win']) .pipe(lambda x:x.assign(win_rate_post = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose))) .drop(['CoachName_win','WTeamID'],1) .rename(columns={"CoachName_lose":"CoachName","LTeamID":"TeamID","which_coach_for_lose":"post_games_lost","which_coach_for_win":"post_games_won"}) .fillna(0) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_win_rate_regular_season_for_each_coach(self):\n self.games_won_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','WTeamID']]\n # merge for winning team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','WTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_win\",\"LastDayNum\":\"LastDayNum_win\",\"CoachName\":\"CoachName_win\",\"TeamID\":\"TeamID_win\"})\n .pipe(lambda x:x.assign(which_coach_for_win = np.where((x.FirstDayNum_win <= x.DayNum) & (x.LastDayNum_win >= x.DayNum),1,0)))\n .query(\"which_coach_for_win != 0\")\n .groupby(['Season','CoachName_win','WTeamID'])\n .agg({\"which_coach_for_win\":\"sum\"})\n .reset_index()\n )\n\n self.games_lose_for_coaches = (\n self.raw_data_regularseason\n [['Season','DayNum','LTeamID']]\n # merge for losing team\n .merge(self.num_days_coach_for_season[['Season','TeamID','FirstDayNum','LastDayNum','CoachName']],\n how='left',left_on=['Season','LTeamID'],right_on=['Season','TeamID'])\n .rename(columns={\"FirstDayNum\":\"FirstDayNum_lose\",\"LastDayNum\":\"LastDayNum_lose\",\"CoachName\":\"CoachName_lose\",\"TeamID\":\"TeamID_lose\"})\n .pipe(lambda x:x.assign(which_coach_for_lose = np.where((x.FirstDayNum_lose <= x.DayNum) & (x.LastDayNum_lose >= x.DayNum),1,0)))\n .query(\"which_coach_for_lose != 0\")\n .groupby(['Season','CoachName_lose','LTeamID'])\n .agg({\"which_coach_for_lose\":\"sum\"})\n .reset_index()\n )\n\n # combine games won and lost df\n self.combine_regular_games_won_lose = (\n self.games_lose_for_coaches\n .merge(self.games_won_for_coaches,how='left',left_on=['Season','LTeamID','CoachName_lose'],right_on=['Season','WTeamID','CoachName_win'])\n .pipe(lambda x:x.assign(win_rate_regular = x.which_coach_for_win/(x.which_coach_for_win + x.which_coach_for_lose)))\n .drop(['CoachName_win','WTeamID'],1)\n .rename(columns={\"CoachName_lose\":\"CoachName\",\"LTeamID\":\"TeamID\",\"which_coach_for_lose\":\"games_lost\",\"which_coach_for_win\":\"games_won\"})\n )", "def get_championship_won_for_each_coach(self):\n self.championship_team = (\n self.raw_data_postseason\n .merge(self.season_max_days,how='left',on=['Season'])\n .query(\"DayNum == season_max_days\")\n .groupby(['Season','WTeamID'])\n .agg({\"NumOT\":\"count\"})\n .reset_index()\n .rename(columns={\"NumOT\":\"is_champion\",\"WTeamID\":\"TeamID\"})\n )", "def get_champion_winrate(self, summoner_id, champion_id):\n request = rq.get(\n 'https://{region}.api.pvp.net/api/lol/{region}/v{api_v}/stats/by-summoner/{summ_id}/ranked?season=SEASON{year}&api_key={api_key}'\n .format(\n region=self.region,\n api_v=api_version['stats'],\n summ_id=summoner_id,\n year=dt.today().year,\n api_key=API_KEY\n )\n )\n try:\n check_response(request)\n champions = request.json()['champions']\n if champions is not None:\n for champion in champions:\n if champion['id'] == champion_id:\n total_won = champion['stats']['totalSessionsWon']\n total = total_won + champion['stats']['totalSessionsLost']\n\n winrate = total_won / total\n return [winrate, total]\n return 0, 0\n except ServiceException:\n return 0, 0", "def winning_games_stats(self):\n self.winning_games_up_to_2013 = (\n self.df\n .pipe(lambda x:x.assign(winning_num_counts = 1))\n .query(\"Season <= 2013\")\n .groupby(['Season','WTeamID'])\n .agg({\"WScore\":\"sum\",\"WFGM\":\"sum\",\"WFGA\":\"sum\",\"WFGM3\":\"sum\",\"WFGA3\":\"sum\",\"WFTM\":\"sum\",\"WFTA\":\"sum\",\"LScore\":\"sum\",\"winning_num_counts\":\"sum\",\n \"WOR\":\"sum\",\"WDR\":\"sum\",\"LFGM\":\"sum\",\"LFGA\":\"sum\",\n \"WAst\":\"sum\",\"WTO\":\"sum\",\"WStl\":\"sum\",\"WBlk\":\"sum\",\"WPF\":\"sum\"})\n .reset_index()\n .rename(columns={\"LScore\":\"losing_opponent_score\"})\n # rebounds\n .pipe(lambda x:x.assign(total_winning_rebounds = x.WOR + x.WDR))\n .pipe(lambda x:x.assign(winning_off_rebounds_percent = x.WOR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(winning_def_rebounds_percent = x.WDR/x.total_winning_rebounds))\n .pipe(lambda x:x.assign(team_missed_attempts = x.WFGA - x.WFGM))\n .pipe(lambda x:x.assign(opp_team_missed_attempts = x.LFGA - x.LFGM))\n .pipe(lambda x:x.assign(winning_rebound_possession_percent = x.WOR/x.team_missed_attempts))\n .pipe(lambda x:x.assign(winning_rebound_possessiongain_percent = x.WDR/x.opp_team_missed_attempts))\n # blocks, steals, assists and turnovers\n .pipe(lambda x:x.assign(winning_block_opp_FGA_percent = x.WBlk/x.LFGA))\n .pipe(lambda x:x.assign(winning_assist_per_fgm = x.WAst/x.WFGM))\n .pipe(lambda x:x.assign(winning_assist_turnover_ratio = x.WAst/x.WTO))\n # rename columns to prevent duplication when joining with losing stats. example: WFGM_x\n .rename(columns={\"LFGA\":\"LFGA_opp\",\"LFGM\":\"LFGM_opp\"})\n )", "def _compute_winrates(synergy, counter, heroes_released):\n for i in range(heroes_released):\n for j in range(heroes_released):\n if i != j and i != 23 and j != 23:\n if synergy['games'][i, j] != 0:\n synergy['winrate'][i, j] = synergy['wins'][i, j] / \\\n float(synergy['games'][i, j])\n\n if counter['games'][i, j] != 0:\n counter['winrate'][i, j] = counter['wins'][i, j] / \\\n float(counter['games'][i, j])", "def calculateWinRate():\n times = 10\n winRate = 0.0\n for i in range(times):\n game = Game('user', 6, 6)\n winRate += game.play(5, False, True, False, False)\n winRate = winRate/times\n print \"Winrate:\", winRate", "def get_final_coach_for_each_season(self):\n self.final_coach_for_season = (\n self.num_days_coach_for_season\n .groupby(['Season','TeamID'])\n .agg({\"CoachName\":\"count\"})\n .reset_index()\n .rename(columns={\"CoachName\":\"coach_counts\"})\n .merge(self.num_days_coach_for_season,how='left',on=['Season','TeamID'])\n .pipe(lambda x:x.assign(final_coach = np.where(x.num_season >= 0.5, x.CoachName, \"ignore\")))\n [['Season','TeamID','final_coach']]\n )", "def winrate(matches):\n if not matches:\n print('no matches')\n return None\n\n win_loss = [match['result'] for match in matches]\n return sum(win_loss)/len(win_loss)", "def marcels_players(goalie, date, df):\n # 0 = that year, 1 is year b4 ....\n marcel_weights = [.36, .29, .21, .14]\n reg_const = 2000\n reg_avg = 0 # Where to regress to\n\n # Use past 3 season to weight games played -> Just take weighted average\n gp_weights = [8, 4, 2, 0]\n\n season = int(helpers.get_season(date))\n\n weighted_goals_sum, weighted_fen_sum, weighted_xg_sum, weights_marcel_sum = 0, 0, 0, 0\n weighted_gp_sum, weights_gp_sum = 0, 0\n\n # Past 4 Seasons\n for i in range(0, 4):\n if season - i > 2006:\n # Subset from stats df\n df_goalie = df[(df['player'] == goalie) & (df['season'] == (season - i))]\n\n # Sanity Check\n if df_goalie.shape[0] > 1:\n print(\"Too many rows!!!!!!!\")\n exit()\n\n # If he played that year\n if not df_goalie.empty:\n weighted_goals_sum += df_goalie.iloc[0]['goals_a'] * marcel_weights[i]\n weighted_fen_sum += df_goalie.iloc[0]['fenwick_a'] * marcel_weights[i]\n weighted_xg_sum += df_goalie.iloc[0]['xg_a'] * marcel_weights[i]\n weighted_gp_sum += df_goalie.iloc[0]['games'] * gp_weights[i]\n\n # -> To divide by at end...normalize everything\n weights_marcel_sum += marcel_weights[i]\n weights_gp_sum += gp_weights[i]\n\n # Normalize weighted sums\n weighted_xg_sum = weighted_xg_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_goals_sum = weighted_goals_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n weighted_fen_sum = weighted_fen_sum / weights_marcel_sum if weights_marcel_sum != 0 else 0\n\n # Get Regressed fsv%\n if weighted_fen_sum != 0:\n weighted_adj_fsv = ((1 - weighted_goals_sum / weighted_fen_sum) - (1 - weighted_xg_sum / weighted_fen_sum)) * 100\n else:\n weighted_adj_fsv = 0\n reg_adj_fsv = weighted_adj_fsv - ((weighted_adj_fsv - reg_avg) * (reg_const / (reg_const + weighted_fen_sum)))\n\n # Get weighted gp\n weighted_gp_sum = weighted_gp_sum / weights_gp_sum if weights_gp_sum != 0 else 0\n\n return {'fsv': reg_adj_fsv, 'gp': weighted_gp_sum}", "def getSeasonStats(self):\n df_season_agg = self.toSeasonAggFormat()\n\n # Calculate Possessions for each game\n df_season_agg['possessions'] = 0.5 * (df_season_agg['FGA'] + 0.475 * df_season_agg['FTA'] - df_season_agg['OR'] + df_season_agg['TO']) \\\n + 0.5 * (df_season_agg['OppFGA'] + 0.475 * df_season_agg['OppFTA'] - df_season_agg['OppOR'] + df_season_agg['OppTO'])\n\n # Aggregate to Season Summary Level\n season_stats = df_season_agg.groupby(['TeamID', 'Season']).sum()\n\n season_stats = season_stats.rename(columns={'Win':'wins'})\n\n # Season Advanced Stats\n season_stats['o_eff'] = season_stats['Score'] / season_stats['possessions'] * 100\n season_stats['d_eff'] = season_stats['OppScore'] / season_stats['possessions'] * 100\n season_stats['net_eff'] = season_stats['o_eff'] - season_stats['d_eff']\n\n season_stats.drop('DayNum', axis=1, inplace=True)\n season_stats.drop('OppTeamID', axis=1, inplace=True)\n season_stats.drop('rand', axis=1, inplace=True)\n\n return season_stats", "def get_player_stats_from_game(team, year, week):", "def cumulative_stats_for_teams_each_year(self):\n self.cumulative_stats_for_team_each_year = (\n self.combine_both_winning_losing_games_stats\n .sort_values(['WTeamID','Season'])\n .groupby(['WTeamID'])\n .cumsum()\n .pipe(lambda x:x.assign(Season = self.combine_both_winning_losing_games_stats.Season.values))\n .pipe(lambda x:x.assign(TeamID = self.combine_both_winning_losing_games_stats.WTeamID.values))\n .drop(['LTeamID','win_rate'],1)\n .pipe(lambda x:x.assign(win_rate = x.winning_num_counts/(x.winning_num_counts + x.losing_num_counts)))\n .pipe(lambda x:x.assign(WFGP = x.WFGM/x.WFGA))\n .pipe(lambda x:x.assign(WFG3P = x.WFGM3/x.WFGA3))\n .pipe(lambda x:x.assign(WFTP = x.WFTM/x.WFTA))\n .pipe(lambda x:x.assign(LFGP = x.LFGM/x.LFGA))\n .pipe(lambda x:x.assign(LFG3P = x.LFGM3/x.LFGA3))\n .pipe(lambda x:x.assign(LFTP = x.LFTM/x.LFTA))\n .pipe(lambda x:x.assign(fgp = x.total_fgm/x.total_fga))\n .pipe(lambda x:x.assign(fg3p = x.total_fg3m/x.total_fg3a))\n .pipe(lambda x:x.assign(ftp = x.total_ftm/x.total_fta))\n # rebounds cumsum stats\n .pipe(lambda x:x.assign(total_def_rebounds_percent = x.total_def_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_off_rebounds_percent = x.total_off_rebounds/x.total_rebounds))\n .pipe(lambda x:x.assign(total_rebound_possession_percent = x.total_off_rebounds/x.total_team_missed_attempts))\n .pipe(lambda x:x.assign(total_rebound_possessiongain_percent = x.total_def_rebounds/x.total_opp_team_missed_attempts))\n # assists, turnovers, steals, blocks and personal fouls\n .pipe(lambda x:x.assign(total_block_opp_FGA_percent = x.total_blocks/x.total_opp_fga))\n .pipe(lambda x:x.assign(total_assist_per_fgm = x.total_assists/x.total_fgm))\n .pipe(lambda x:x.assign(total_assist_turnover_ratio = x.total_assists/x.total_turnover))\n # win or lose by how many points\n .pipe(lambda x:x.assign(lose_rate = 1-x.win_rate))\n .pipe(lambda x:x.assign(win_score_by = x.WScore - x.losing_opponent_score))\n .pipe(lambda x:x.assign(lose_score_by = x.LScore - x.winning_opponent_score))\n .pipe(lambda x:x.assign(expectation_per_game = x.win_rate * x.win_score_by/x.winning_num_counts + x.lose_rate * x.lose_score_by/x.losing_num_counts))\n .pipe(lambda x:x.assign(avg_win_score_by = x.win_score_by/x.winning_num_counts))\n .pipe(lambda x:x.assign(avg_lose_score_by = x.lose_score_by/x.losing_num_counts))\n )", "def get_team_round_score(self, team: int, match_round: int) -> int:\n assert self.rounds, \"There are no rounds or the requested round doesn't exist\"\n\n team_score = 0\n\n if team == 1:\n for player in self.rounds[match_round - 1].team1.players:\n team_score += player.scores\n else:\n for player in self.rounds[match_round - 1].team2.players:\n team_score += player.scores\n\n return team_score", "def team_season_stats(team):\n\n # Get HTML Content\n url = 'http://www.basketball-reference.com/teams/%s/stats_per_game_totals.html' % team\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # Team's yearly stats are displayed in a table\n season_stats = soup.find(id='stats').find('tbody')\n\n # Iterate through each year\n for year in season_stats.find_all('tr', {'class': None}):\n\n season_year = year.find('th').text[0:4]\n season_year = int(season_year) + 1\n season = {'year': season_year}\n\n # Loop through each stat\n for stat in year.find_all('td'):\n season[stat['data-stat']] = stat.string\n\n # Rename relocated teams\n season['team_id'] = scrape_utils.rename_team(season['team_id'])\n season['_id'] = season['team_id'] + '_' + str(season_year)\n\n # Remove unwanted stats\n to_remove = ['rank_team', 'foo', 'g', 'mp_per_g']\n for k in to_remove:\n season.pop(k, None)\n\n # Add to MongoDB\n m.insert('team_season', season)", "def get_winpercent(self, game: 'Game' = None, player: 'Player' = None):\n if game and player:\n pass\n elif game:\n play_count = self.play_set.filter(game=game).count()\n win_count = self.play_set.filter(winner=self, game=game).count()\n return win_count / play_count * 100\n elif player:\n pass\n # play_count = self.play_set.filter(players__in=player).count()\n # win_count = self.play_set.filter(\n # winner=self, player=player).count()\n # return win_count / play_count * 100\n else:\n return self.winpercent", "def glicko2_summarize(df, min_weeks=4, use_prior=False):\n\n # Loop through seasons and weeks to create full history of ratings by team\n results = pd.DataFrame()\n for season in df['Season'].sort_values().unique():\n for week in df[df['Season']==season]['Week'].sort_values().unique():\n if week > min_weeks:\n if week == min_weeks + 1:\n season_df = df[df['Season']==season].copy()\n uniqueteamids = pd.concat([season_df['VisID'],\n season_df['HomeID']]).unique()\n if use_prior == True and season > df['Season'].min():\n ratings = np.repeat(1500, len(uniqueteamids))\n ratingsdeviance = np.repeat(350, len(uniqueteamids))\n sigma = np.repeat(0.06, len(uniqueteamids))\n glicko_stats = pd.DataFrame({'ratings': ratings,\n 'ratingsdeviance': ratingsdeviance,\n 'sigma': sigma}, index=uniqueteamids)\n prior = results[results['Season']==season-1]\n prior_id_mask = [True if id in uniqueteamids else False for id in prior['TeamID']]\n prior = prior[prior_id_mask]\n prior = prior.sort_values('Week').groupby('TeamID').tail(1)\n prior = prior.drop('Week',1)\n prior = prior.set_index('TeamID')\n glicko_stats.loc[prior.index, 'ratings'] = prior['Glicko_Rating'] - (prior['Glicko_Rating'] - 1500)/2\n glicko_stats.loc[prior.index, 'ratingsdeviance'] = prior['Glicko_Rating_Deviance'] - (prior['Glicko_Rating_Deviance'] - 350)/2\n glicko_stats.loc[prior.index, 'sigma'] = prior['Glicko_Sigma'] - (prior['Glicko_Sigma'] - 0.06)/2\n else:\n ratings = np.repeat(1500, len(uniqueteamids))\n ratingsdeviance = np.repeat(350, len(uniqueteamids))\n sigma = np.repeat(0.06, len(uniqueteamids))\n glicko_stats = pd.DataFrame({'ratings': ratings,\n 'ratingsdeviance': ratingsdeviance,\n 'sigma': sigma}, index=uniqueteamids)\n\n week_df = df[(df['Season']==season) & (df['Week']<week)].copy()\n glicko_stats = glicko2(week_df, uniqueteamids, glicko_stats)\n\n\n glicko_results = glicko_stats.reset_index()\n print(glicko_results.head(), season)\n glicko_results.columns = ['TeamID','Glicko_Rating',\n 'Glicko_Rating_Deviance',\n 'Glicko_Sigma']\n glicko_results['Season'] = season\n glicko_results['Week'] = week\n results = pd.concat([results, glicko_results], axis=0,\n ignore_index=True)\n\n # Join the ratings to the original schedule of games\n df = df.merge(results, left_on=['Season','Week','HomeID'],\n right_on=['Season','Week','TeamID'],\n suffixes=('','_Home'))\n df.drop('TeamID', 1, inplace=True)\n\n df = df.merge(results, left_on=['Season','Week','VisID'],\n right_on=['Season','Week','TeamID'],\n suffixes=('','_Away'))\n df.drop('TeamID', 1, inplace=True)\n\n # Create key and set index to join with n_game summaries dataset.\n df.set_index(['HomeID', 'VisID', 'Season', 'Week'], inplace=True)\n df = df[['Glicko_Rating', 'Glicko_Rating_Deviance', 'Glicko_Sigma',\n 'Glicko_Rating_Away', 'Glicko_Rating_Deviance_Away',\n 'Glicko_Sigma_Away']]\n df.columns = ['Glicko_Rating_Home', 'Glicko_Rating_Deviance_Home',\n 'Glicko_Sigma_Home', 'Glicko_Rating_Away',\n 'Glicko_Rating_Deviance_Away', 'Glicko_Sigma_Away']\n\n return df", "def calc_winner(self):\n pass", "def rate_club(user, club):\n if not user.is_authenticated():\n return None\n if not club.posel_set.exists():\n return None\n return sum(x[1] for x in rank_in_club(user, club)) / club.posel_set.count()", "def seasonStats(personId,type = 'gameLog',group = 'hitting'):\n\n #playerInfo = get('people', {'personIds':personId})\n\n\n teamStats = get('person',{ 'ver':'v1' , 'personId':personId,'hydrate':['stats(group={},type={})'.format(group,type),'currentTeam']})\n return teamStats\n #iterate of stats and find the right player id\n #career stats broken\n #fix the season :2019\n #make function to get team id", "def get_new_ratings(players, teams):\n nb_players_team0 = len(teams[0])\n nb_players_team1 = len(teams[1])\n winner = players[teams[0][0]]\n loser = players[teams[1][0]]\n if nb_players_team0 == 1 and nb_players_team1 == 1:\n new_r1, new_r3 = rate_1vs1(winner,loser)\n elif nb_players_team0 == 1 and nb_players_team1 > 1:\n team_loser = [loser, players[teams[1][1]]]\n (new_r1), (new_r3, new_r4) = rate([winner, team_loser], ranks=[0, 1]) \n elif nb_players_team0 > 1 and nb_players_team1 == 1:\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3) = rate([team_winner, loser], ranks=[0, 1]) \n else:\n team_loser = [loser, players[teams[1][1]]]\n team_winner = [winner, players[teams[0][1]]]\n (new_r1, new_r2), (new_r3, new_r4) = rate([team_winner, team_loser], ranks=[0, 1]) \n player1 = {'name': teams[0][0], 'mu': new_r1.mu, 'sigma': new_r1.sigma}\n player3 = {'name': teams[1][0], 'mu': new_r3.mu, 'sigma': new_r3.sigma}\n if nb_players_team0 > 1:\n player2 = {'name': teams[0][1], 'mu': new_r2.mu, 'sigma': new_r2.sigma}\n if nb_players_team1 > 1:\n player4 = {'name': teams[1][1], 'mu': new_r4.mu, 'sigma': new_r4.sigma}\n if nb_players_team0 > 1:\n return [player1, player2, player3, player4]\n return [player1, player2, player4]\n return [player1, player3]", "def combine_playoff_championship_for_each_coach(self):\n self.final_coach_with_postseason_champion_each_year = (\n self.final_coach_with_postseason_each_year\n .merge(self.championship_team,how='left',on=['Season','TeamID'])\n .fillna(0)\n )", "def season_game_logs(team, year):\n\n # Check year value\n if year > 2019 or year < 1950:\n raise ValueError('Year Value Incorrect')\n\n # Rename teams that moved\n team = scrape_utils.rename_team(team, year)\n\n # Get HTML content\n url = 'http://www.basketball-reference.com/teams/%s/%s/gamelog' % (team, year)\n r = requests.get(url)\n soup = BeautifulSoup(r.content, \"html.parser\")\n season_stats = soup.find(id='tgl_basic')\n games = season_stats.find('tbody')\n\n # MongoDB Collection\n m = mongo.Mongo()\n\n # To find opponent statistics\n opponent = re.compile('^opp_.*$')\n\n # Loop through every game in a team's season\n for game in games.find_all('tr', {'class': None}):\n\n curr_team = {'team': team}\n opp_team = {}\n\n # Loop through each stat\n for stat in game.find_all('td'):\n\n stat_name = stat['data-stat']\n\n # These are opponent stats\n if re.match(opponent, stat_name):\n opp_team[stat_name[4:]] = scrape_utils.stat_parse(stat_name, stat.string)\n else:\n curr_team[stat_name] = scrape_utils.stat_parse(stat_name, stat.string)\n\n # Remove unnecessary information\n del curr_team['game_season']\n del curr_team['x']\n\n # Rename relocated teams\n curr_team['team'] = scrape_utils.rename_team(team)\n opp_team['team'] = scrape_utils.rename_team(opp_team.pop('id'))\n\n # Use the same ID as basketball reference\n result = {'date': datetime.strptime(curr_team.pop('date_game'), \"%Y-%m-%d\"),\n 'season': year,\n 'result': scrape_utils.determine_home_win(curr_team['game_location'], curr_team.pop('game_result')),\n '_id': game.find('a')['href'][-17:-5]}\n\n # Place the teams in the correct spot depending on who is the home team\n if curr_team.pop('game_location') == 0:\n result['home'] = curr_team\n result['away'] = opp_team\n else:\n result['home'] = opp_team\n result['away'] = curr_team\n\n # Insert into database\n m.insert('game_log', result)", "def season_rounds(cls, season):\r\n\t\t\r\n\t\tfolder_name = cls.season_folder(season)\r\n\t\tround_list = os.listdir(f'Data/{folder_name}')\r\n\r\n\t\tall_rounds = []\r\n\r\n\t\tfor round_file in round_list:\r\n\t\t\twith open(f'Data/{folder_name}/{round_file}', 'r', encoding='utf-8') as f:\r\n\t\t\t\tround_info = f.read().splitlines()\r\n\r\n\t\t\tround_number = round_file[:-4]\r\n\t\t\tfull_round_name = f\"{season} R{round_number}\"\r\n\r\n\t\t\tround_date = int(round_info[0])\r\n\t\t\tlookup_ind = DATES.month_diff(round_date, DATES.MIN_DATE)\r\n\r\n\t\t\t# If the round isn't actually counted for TWOW Glicko\r\n\t\t\tif full_round_name not in cls.ROUNDS[lookup_ind].keys():\r\n\t\t\t\tcontinue\r\n\r\n\t\t\tcontestant_count = len(round_info) - 1\r\n\r\n\t\t\tstrength = cls.ROUNDS[lookup_ind][full_round_name][0]\r\n\r\n\t\t\tall_rounds.append([\r\n\t\t\t\tround_number,\r\n\t\t\t\tround_date,\r\n\t\t\t\tcontestant_count,\r\n\t\t\t\tstrength\r\n\t\t\t])\r\n\t\t\r\n\t\treturn all_rounds", "def get_tourney_rounds(self, conference, year):\n ts_dict = self.get_tourney_slots()\n seed_dict = self.get_tourney_seeds()\n tr_dict = self.get_tourney_results()\n \n round_1 = list()\n round_2 = list()\n round_3 = list()\n round_4 = list()\n winner = list()\n \n round1_winners = list()\n for seed, team in seed_dict[year].items():\n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n round1_winners.append(seed[1:])\n #removes duplicates because I did this part weirdly... HEHEH\n round1_winners = list(set(round1_winners))\n\n win_counter = defaultdict(int)\n for seed, team in seed_dict[year].items(): \n for winning, losing in tr_dict[year]:\n if team == winning and conference in seed:\n win_counter[winning] += 1\n \n for slot, matchup in ts_dict[year].items():\n \n if conference in slot and \"R1\" in slot: \n round_1.append(\"{}-{}\".format(matchup[1:3], matchup[-2:]))\n round_1 = sorted(round_1)\n #for match in round_1:\n for winner1 in round1_winners:\n if winner1 in round_1[0]:\n for winner2 in round1_winners:\n if winner2 in round_1[-1]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[1]:\n for winner2 in round1_winners:\n if winner2 in round_1[-2]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[2]:\n for winner2 in round1_winners:\n if winner2 in round_1[-3]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_1[3]:\n for winner2 in round1_winners:\n if winner2 in round_1[-4]:\n round_2.append(\"{}-{}\".format(winner1, winner2))\n round_2 = sorted(round_2)\n\n round2_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 1:\n round2_winners.append(seed[1:])\n \n for winner1 in round2_winners:\n if winner1 in round_2[0]:\n for winner2 in round2_winners:\n if winner2 in round_2[-1]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n if winner1 in round_2[1]:\n for winner2 in round2_winners:\n if winner2 in round_2[-2]:\n round_3.append(\"{}-{}\".format(winner1, winner2))\n round_3 = sorted(round_3)\n\n round3_winners = list()\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 2:\n round3_winners.append(seed[1:])\n\n for winner1 in round3_winners:\n if winner1 in round_3[0]:\n for winner2 in round3_winners:\n if winner2 in round_3[-1]:\n round_4.append(\"{}-{}\".format(winner1, winner2))\n round_4 = sorted(round_4)\n\n for seed, team in seed_dict[year].items():\n for team2, count in win_counter.items():\n if team == team2 and count > 3:\n winner.append(seed[1:])\n\n conferences = {\"W\": \"East\", \"X\": \"Midwest\", \"Y\": \"South\", \"Z\": \"West\"}\n\n #print(\"CONFERENCE: {}, YEAR: {}\".format(conferences[conference], year))\n #print(\"ROUND1:\", round_1)\n #print(\"ROUND2:\", round_2)\n #print(\"ROUND3:\", round_3)\n #print(\"ROUND4:\", round_4)\n #print(\"WINNER:\", winner)\n\n #clearing out the tourney results dictionary\n #tr_dict.clear()\n\n return round_1, round_2, round_3, round_4, winner", "def assign_win_points(game_dict):\n def win_tuple(deck_dict):\n \"\"\" Return tuple ordered by increasing final standing. \"\"\"\n # negate turns so that max() behaves; points good, turns bad.\n num_normal_turns = sum(not ( (POSSESSION in t and t[POSSESSION]) or \\\n (OUTPOST in t and t[OUTPOST]))\n for t in deck_dict[TURNS])\n return (deck_dict[POINTS], -num_normal_turns)\n\n decks = game_dict[DECKS]\n winner_tuple = max(win_tuple(p) for p in decks)\n winners = [p for p in decks if win_tuple(p) == winner_tuple]\n\n win_points = float(len(decks)) / len(winners)\n for player in decks:\n player[WIN_POINTS] = win_points if player in winners else 0.0", "def compute_score(window, computer_piece):\n score = 0\n if window.count(computer_piece) == 4:\n score += 100\n elif window.count(computer_piece) == 3 and window.count(0) == 1:\n score += 5\n elif window.count(computer_piece) == 2 and window.count(0) == 2:\n score += 2\n if window.count(PLAYER_PIECE) == 2 and window.count(0) == 2:\n score -= 1\n if window.count(PLAYER_PIECE) == 3 and window.count(0) == 1:\n score -= 100\n return score", "def get_smmry_data(soup, game_dict):\n\n # Get date and time data.\n try:\n date_soup = soup.find(\"div\", {\"class\": \"spielbericht_tipp_status\"})\n league = date_soup.div.span.text\n date_string = date_soup.div.text\n date = re.search(r'\\d{2}.\\d{2}.\\d{2}', date_string).group(0)\n time = re.search(r'\\d{2}:\\d{2}', date_string).group(0)\n matchday = re.search(r'[|]\\d+', date_string).group(0)[1:]\n\n game_dict[\"league\"] = league\n game_dict[\"fb_date\"] = date\n game_dict[\"fb_time\"] = time\n game_dict[\"matchday\"] = matchday\n except AttributeError:\n pass\n\n # Get game result.\n try:\n result = soup.find(\"div\", {\"class\": \"stand\"}).text\n game_dict[\"result\"] = result\n except AttributeError:\n pass\n\n # Try to get the referee name.\n try:\n referee = soup.find(\"span\", {\"class\": \"schiri_link\"}).text\n game_dict[\"referee\"] = referee\n except AttributeError:\n pass\n\n # Get team, club name and repective url by team.\n try:\n smmry_soup = soup.find(\n \"div\", {\"class\": \"spielbericht_ergebnis_wrapper\"})\n club_title = smmry_soup.find_all(\"img\")\n team_title = smmry_soup.findAll(\"div\", {\"class\": \"teaminfo\"})\n\n # Loop through teams.\n for j, team in enumerate([\"home_\", \"away_\"]):\n game_dict[team + \"team\"] = team_title[j].a[\"title\"]\n game_dict[team + \"team_url\"] = team_title[j].a[\"href\"]\n game_dict[team + \"club\"] = club_title[j][\"title\"]\n except (AttributeError, TypeError):\n pass\n\n return game_dict", "def winRate(DF):\r\n df = DF[\"return\"]\r\n pos = df[df>1]\r\n neg = df[df<1]\r\n return (len(pos) / len(pos + neg)) * 100", "def games(self, competition_id: int, season_id: int) -> DataFrame[Any]:", "def get_result(state, winrate_predictor):\n teamA_picks = state[:, TEAM_A_PICK_INDICES]\n teamB_picks = state[:, TEAM_B_PICK_INDICES]\n team_comp = torch.cat((teamA_picks, teamB_picks), dim=1)\n winrate = winrate_predictor(team_comp)[0, 0]\n \n if winrate >= 0.5:\n return 0\n return 1" ]
[ "0.7711851", "0.6832915", "0.6480246", "0.63182116", "0.6245573", "0.61112136", "0.611068", "0.602987", "0.59854066", "0.5965334", "0.5964837", "0.57526016", "0.57137907", "0.5671769", "0.5644969", "0.5635259", "0.56290424", "0.5599348", "0.5596914", "0.5574907", "0.5569233", "0.5504836", "0.5483212", "0.54613936", "0.54592854", "0.54412836", "0.5427208", "0.54221195", "0.54116994", "0.5408788" ]
0.8241553
0
Test that upload Logger Type file without microsite_id will not be inserted to database
def test_logger_type_upload_MicrositeId_None(self): test_filename = 'server/tests/test_data_files/Test/Test_New_Logger_Type_MicrositeId_None.csv' with self.app.test_client() as client: with client.session_transaction() as sess: sess['logged_in'] = True response = client.post('/upload', data={ 'loggerTypeFile': (open(test_filename, 'rb'), 'Test_New_Logger_Type_MicrositeId_None.csv') }, follow_redirects=True) query = ("SELECT * from cnx_logger_biomimic_type where biomimic_type='DummyBiomimicTypeNone'") cursor = self.db.connection.cursor() cursor.execute(query) results = cursor.fetchall() results = list(results) self.assertEqual(len(results), 0)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_wrong_file_type(self):\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.doc\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_400_BAD_REQUEST, response.status_code)\n if os.path.exists(PHOTOS_SAVE_PATH):\n self.assertNotIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))", "def test_upload_file(self):\n pass", "def test_upload_file1(self):\n pass", "def test_file_upload_file_with_the_same_name_already_exists(\n staff_api_client, media_root, site_settings\n):\n # given\n image_file1, image_name1 = create_image()\n path = default_storage.save(image_file1._name, image_file1)\n\n image_file, image_name = create_image()\n assert image_file1 != image_file\n assert image_name == image_name1\n assert image_file._name == image_file1._name\n\n variables = {\"image\": image_name}\n body = get_multipart_request_body(\n FILE_UPLOAD_MUTATION, variables, image_file, image_name\n )\n\n # when\n response = staff_api_client.post_multipart(body)\n\n # then\n content = get_graphql_content(response)\n data = content[\"data\"][\"fileUpload\"]\n errors = data[\"errors\"]\n\n domain = site_settings.site.domain\n assert not errors\n assert data[\"uploadedFile\"][\"contentType\"] == \"image/png\"\n file_url = data[\"uploadedFile\"][\"url\"]\n assert file_url != f\"http://{domain}/media/{image_file._name}\"\n assert file_url != f\"http://{domain}/media/{path}\"\n assert default_storage.exists(file_url.replace(f\"http://{domain}/media/\", \"\"))", "def test_local_uploader_upload_wrong_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.txt')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as this extension is not allowed\")\r\n assert res is False, err_msg", "def test_upload_bad_file(self):\n url = image_upload_url(self.reteta.id)\n res = self.client.post(url, {'image': 'notimage'}, format='multipart')\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)", "def test_empty_upload(self):\r\n self._login_admin()\r\n\r\n res = self.app.post(\r\n '/admin/import',\r\n params={'api_key': self.api_key},\r\n upload_files=[],\r\n )\r\n self.assertTrue(\r\n 'Please provide a file to import' in res.body,\r\n \"Error message should be present\")", "def test_file_field():", "def test_local_uploader_upload_correct_file(self, mock):\r\n mock.save.return_value = None\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return True, \\\r\n as this extension is allowed\")\r\n assert res is True, err_msg", "def test_upload(self):\n with self.client:\n file = dict(\n file=(BytesIO(b'my file contents'), \"foto.jpg\"),\n )\n response = self.client.post('/upload',\n content_type='multipart/form-data',\n data=file)\n self.assertEqual(status.HTTP_200_OK, response.status_code)\n self.assertIn('foto.jpg', os.listdir(PHOTOS_SAVE_PATH))\n self.assertIn('foto.jpg', [photo.filename for photo in Photo.query.all()])", "def test_missing_extension(client: FlaskClient):\n file = get_example_file(ExampleFileType.Txt)\n file.filename = \"test\"\n response = util.upload_file(client, DEFAULT_USER, file)\n assert response.status == \"400 BAD REQUEST\"", "def test_logger_temperature_upload_duplicate(self):\n test_type_filename = 'server/tests/test_data_files/Test/Test_New_Logger_Type_Positive.csv'\n test_temp_filename = 'server/tests/test_data_files/Test/temp_files/DUMMYID_2000_pgsql_Duplicate.txt'\n with self.app.test_client() as client:\n with client.session_transaction() as sess:\n sess['logged_in'] = True\n response = client.post('/upload', \n data={\n 'loggerTypeFile': (open(test_type_filename, 'rb'), 'Test_New_Logger_Type_Positive.csv')\n }, follow_redirects=True)\n response = client.post('/upload', \n data={\n 'loggerTempFile': (open(test_temp_filename, 'rb'), 'DUMMYID_2000_pgsql_Duplicate.txt')\n }, follow_redirects=True)\n record_type = {\n \"microsite_id\" : \"DUMMYID\",\n \"site\" : \"DUMMYSITE\",\n \"biomimic_type\" : \"Dummybiomimictype\",\n \"country\" : \"Dummycountry\",\n \"state_province\" : \"Dummystate\",\n \"location\" : \"Dummylocation\",\n \"field_lat\" : \"36.621933330000\",\n \"field_lon\" : \"-121.905316700000\",\n \"zone\" : \"DummyZone\",\n \"sub_zone\" : \"DummySubZone\",\n \"wave_exp\" : \"DummyWave\",\n \"start_date\": str(datetime.strptime(\"7/1/2000\",'%m/%d/%Y').date()),\n \"end_date\": str(datetime.strptime(\"7/2/2000\",'%m/%d/%Y').date())} \n where_condition = self.db.build_where_condition(record_type)\n query = (\"SELECT temp.Time_GMT, temp.Temp_C \"\n \"FROM `cnx_logger` logger \"\n \"INNER JOIN `cnx_logger_biomimic_type` biotype ON biotype.`biomimic_id` = logger.`biomimic_id` \"\n \"INNER JOIN `cnx_logger_geographics` geo ON geo.`geo_id` = logger.`geo_id` \"\n \"INNER JOIN `cnx_logger_properties` prop ON prop.`prop_id` = logger.`prop_id` \"\n \"INNER JOIN `cnx_logger_temperature` temp ON temp.`logger_id` = logger.`logger_id` \")\n cursor = self.db.connection.cursor()\n cursor.execute(query + where_condition)\n results = cursor.fetchall()\n results = list(results)\n self.cleanUpLoggerTemp(cursor)\n self.cleanUpLoggerType(cursor, record_type) \n cursor.close()\n self.assertEqual(len(results), 1)", "def test_should_file_field(self):\n self.assertIn(\"image\", self.fields)", "def test_upload_file(self):\n\n uploadFile = os.path.join(testdatadir, \"upload.data\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(link).read()\n )\n\n # Re-upload slightly different file.\n uploadFile2 = os.path.join(testdatadir, \"upload2.data\")\n r = gracedb.writeFile(\n eventId,\n filename=\"upload.data\",\n filecontents=open(uploadFile2, 'r'))\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link2 = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(link2).read()\n )\n\n self.assertNotEqual(link, link2)", "def upload_build(self, name, directory):\n logging.info('Not uploading build because no Filestore.')", "def test_no_mimetype(self):\n field = TypedFileField(required=False, type_whitelist=self.good_types, use_magic=False)\n\n for t in self.good_types:\n name = 'somefooname'\n file = UploadedFile(name=name, size=1, content_type=t)\n del file.content_type\n with pytest.raises(forms.ValidationError):\n field.clean(file)", "def test_upload_microbe_directory(self, auth_headers, *_):\n sample = Sample(name='SMPL_Microbe_Directory_01').save()\n sample_uuid = str(sample.uuid)\n with self.client:\n response = self.client.post(\n f'/api/v1/samples/{sample_uuid}/microbe_directory_annotate',\n headers=auth_headers,\n data=json.dumps(TEST_DIRECTORY),\n content_type='application/json',\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertIn('success', data['status'])\n for field in TEST_DIRECTORY:\n self.assertIn(field, data['data'])\n\n # Reload object to ensure microbe directory result was stored properly\n sample = Sample.objects.get(uuid=sample_uuid)\n self.assertTrue(sample.microbe_directory_annotate)", "def test_create_model_without_file(test_client, dataset):\n response = test_client.post('/create?target=Species',\n data={\"file\": \"No file\"},\n content_type=\"multipart/form-data\")\n assert response.status_code == 400", "def test_local_uploader_upload_fails(self, mock):\r\n u = LocalUploader()\r\n file = FileStorage(filename='test.jpg')\r\n res = u.upload_file(file, container='user_3')\r\n err_msg = (\"Upload file should return False, \\\r\n as there is an exception\")\r\n assert res is False, err_msg", "def upload_coverage(self, name, directory):\n logging.info('Not uploading coverage because no Filestore.')", "def test_upload_job_description_file_post(self):\n pass", "def test_api_create_unknown_file(api):\n api.create_article(\n {\"title\": \"A Title\"},\n {\"key1\": \"value1\"},\n {'test.zip': 'FFFDASFAFADADFA'},\n )\n req_call = requests.request\n req_kw = req_call.call_args[1]\n data_lines = req_kw['data'].split(b'\\r\\n')\n # Unknown file type is skipped\n assert len(data_lines) == 11", "def test_upload_manifest_non_existing_trial_id(\n cidc_api, some_file, clean_db, monkeypatch\n):\n user_id = setup_trial_and_user(cidc_api, monkeypatch)\n make_admin(user_id, cidc_api)\n\n mocks = UploadMocks(monkeypatch, prismify_trial_id=\"test-non-existing-trial-id\")\n\n client = cidc_api.test_client()\n\n res = client.post(MANIFEST_UPLOAD, data=form_data(\"pbmc.xlsx\", some_file, \"pbmc\"))\n assert res.status_code == 400\n assert \"test-non-existing-trial-id\" in str(res.json[\"_error\"][\"message\"])\n\n # Check that we tried to upload the excel file\n mocks.upload_xlsx.assert_not_called()\n mocks.iter_errors.assert_called_once()\n mocks.prismify.assert_called_once()", "def test_upload_empty(self):\n rv = self.post(url='/queue/', content=None, token=self.user_token)\n self.assertJSONError(rv, 'TagalleryMissingFile')\n return", "def test_no_mimetype_magic(self, mock_get_content_type):\n mock_get_content_type.side_effect = ValueError\n\n field = TypedFileField(required=False, type_whitelist=self.good_types)\n\n for t in self.good_types:\n name = 'somefooname'\n file = UploadedFile(name=name, size=1, content_type=t)\n with pytest.raises(forms.ValidationError):\n field.clean(file)", "def test_invalid_file_type(self):\n file = SimpleUploadedFile(\"test.csv\", b\"\\xe8\")\n form = MomConnectImportForm(\n data={\"source\": \"MomConnect Import\"}, files={\"file\": file}\n )\n self.assertTrue(form.is_valid())\n instance = form.save()\n self.assertEqual(instance.status, MomConnectImport.Status.ERROR)\n [error] = instance.errors.all()\n self.assertEqual(error.error, \"File is not a CSV\")", "def test_file_upload(self):\n\n with tempfile.NamedTemporaryFile() as test_file:\n test_file.write(\n u'date,category,employee name,employee address,expense description,pre-tax amount,tax name,tax amount\\n')\n test_file.write(\n u'12/1/2013,Travel,Don Draper,\"783 Park Ave, New York, NY 10021\",Taxi ride, 350.00 ,NY Sales tax, 31.06\\n')\n test_file.flush()\n response = self.send_file_upload_request(view_name='csv_import_view', filename=test_file.name)\n\n actual_import_logs = ImportLog.objects.all()\n self.assertEqual(1, len(actual_import_logs))\n\n actual_import_log = actual_import_logs[0]\n expected_file_name = os.path.basename(test_file.name)\n self.assertEqual(expected_file_name, actual_import_log.file_name)\n\n expense_items = ExpenseItem.objects.all()\n self.assertEqual(1, len(expense_items))\n self.assertEqual('Don Draper', expense_items[0].employee.name)\n\n self.assertEqual('{\"upload_id\": 1}', response.content)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def test_upload_duplicate(client: FlaskClient):\n file = get_example_file(ExampleFileType.Png)\n response1 = util.upload_file(client, DEFAULT_USER, file)\n response2 = util.upload_file(client, DEFAULT_USER, file)\n\n assert response1.status == \"201 CREATED\"\n assert response2.status == \"200 OK\"\n assert response1.json == response2.json", "def test_NoFileUploaded_NoRedirectionAfterSubmint(self):\n\n self.open(config.url)\n self.click(config.submit_file)\n self.assert_element_present('#send-title')" ]
[ "0.676866", "0.66889274", "0.65706724", "0.64305943", "0.640982", "0.639273", "0.6387645", "0.63190717", "0.62991345", "0.6276941", "0.62692577", "0.62305576", "0.6219944", "0.6209555", "0.6150867", "0.61412674", "0.6103418", "0.6050772", "0.602671", "0.6003001", "0.5965992", "0.5957162", "0.5927887", "0.59203917", "0.5907153", "0.5901281", "0.58993", "0.5894678", "0.5893892", "0.58770597" ]
0.81712246
0
Test that Logger Temperature file with duplicate entry cannot be uploaded
def test_logger_temperature_upload_duplicate(self): test_type_filename = 'server/tests/test_data_files/Test/Test_New_Logger_Type_Positive.csv' test_temp_filename = 'server/tests/test_data_files/Test/temp_files/DUMMYID_2000_pgsql_Duplicate.txt' with self.app.test_client() as client: with client.session_transaction() as sess: sess['logged_in'] = True response = client.post('/upload', data={ 'loggerTypeFile': (open(test_type_filename, 'rb'), 'Test_New_Logger_Type_Positive.csv') }, follow_redirects=True) response = client.post('/upload', data={ 'loggerTempFile': (open(test_temp_filename, 'rb'), 'DUMMYID_2000_pgsql_Duplicate.txt') }, follow_redirects=True) record_type = { "microsite_id" : "DUMMYID", "site" : "DUMMYSITE", "biomimic_type" : "Dummybiomimictype", "country" : "Dummycountry", "state_province" : "Dummystate", "location" : "Dummylocation", "field_lat" : "36.621933330000", "field_lon" : "-121.905316700000", "zone" : "DummyZone", "sub_zone" : "DummySubZone", "wave_exp" : "DummyWave", "start_date": str(datetime.strptime("7/1/2000",'%m/%d/%Y').date()), "end_date": str(datetime.strptime("7/2/2000",'%m/%d/%Y').date())} where_condition = self.db.build_where_condition(record_type) query = ("SELECT temp.Time_GMT, temp.Temp_C " "FROM `cnx_logger` logger " "INNER JOIN `cnx_logger_biomimic_type` biotype ON biotype.`biomimic_id` = logger.`biomimic_id` " "INNER JOIN `cnx_logger_geographics` geo ON geo.`geo_id` = logger.`geo_id` " "INNER JOIN `cnx_logger_properties` prop ON prop.`prop_id` = logger.`prop_id` " "INNER JOIN `cnx_logger_temperature` temp ON temp.`logger_id` = logger.`logger_id` ") cursor = self.db.connection.cursor() cursor.execute(query + where_condition) results = cursor.fetchall() results = list(results) self.cleanUpLoggerTemp(cursor) self.cleanUpLoggerType(cursor, record_type) cursor.close() self.assertEqual(len(results), 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_upload_duplicate(client: FlaskClient):\n file = get_example_file(ExampleFileType.Png)\n response1 = util.upload_file(client, DEFAULT_USER, file)\n response2 = util.upload_file(client, DEFAULT_USER, file)\n\n assert response1.status == \"201 CREATED\"\n assert response2.status == \"200 OK\"\n assert response1.json == response2.json", "def test_upload_duplicate(self):\n # Attempt to upload a duplicate\n with self.assertRaises(trovebox.TroveboxDuplicateError):\n self.client.photo.upload(\"tests/data/test_photo1.jpg\",\n title=self.TEST_TITLE)\n\n # Check there are still three photos\n self.photos = self.client.photos.list()\n self.assertEqual(len(self.photos), 3)", "def test_duplicate_entries(self):", "def test_upload_duplicate_file_create(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 1)\n first_content = Content.objects.first()\n\n # Duplicate File.\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n response_payload = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(response_payload['result'], 'error')\n self.assertEqual(response_payload['error'], 'DUPLICATE_FILE_UPLOADED')\n self.assertRegex(\n response_payload['existing_content']['content_url'],\n '%s$' % reverse('content-detail', args=[first_content.pk])\n )\n self.assertRegex(\n response_payload['existing_content']['file_url'],\n '%s$' % first_content.content_file.url\n )", "def test_fileAlreadyExistsNoOverwrite(self):\n fp = FilePath(self.mktemp())\n fp.touch()\n\n self.assertRaises(OSError, self.makeConnectedDccFileReceive, fp.path)", "def test_existing_file_name(self):\n\t\ttp = self.sess.query(sql.Post).filter(sql.Post.reddit_id == 't3_ahal9v').first()\n\t\tfile = ng.choose_file_name(tp.urls[0], tp, sql.session(), album_size=1)\n\t\tself.assertTrue(file.endswith(' - 2'), msg='Failed to increment duplicate post!')", "def test_add1(self):\n try:\n TempfileManager.add_tempfile(tempdir + 'add1')\n self.fail(\"Expected IOError because file 'add1' does not exist\")\n except IOError:\n pass", "def test_duplicate_file_on_create(self):\n first_value = {\n \"name\": \"Content 1\",\n \"description\": \"Content's Description\",\n \"content_file\": SimpleUploadedFile(\n \"uploaded_file_name\", \"This will be the contents of the uploaded file.\".encode()\n ),\n \"updated_time\": timezone.now()\n }\n content1 = Content(**first_value)\n content1.content_file_uploaded = True\n content1.save()\n content1.content_file.close()\n\n second_value = {\n \"name\": \"Content 2\",\n \"description\": \"Content's Description\",\n \"content_file\": SimpleUploadedFile(\n \"uploaded_file_name_2\", \"This will be the contents of the uploaded file.\".encode()\n ),\n \"updated_time\": timezone.now()\n }\n content2 = Content(**second_value)\n content2.content_file_uploaded = True\n with self.assertRaises(DuplicateContentFileException) as cm:\n content2.save()\n self.assertEqual(cm.exception.content.pk, content1.pk)\n content2.content_file.close()", "def test_error_noted_in_response_if_meter_has_overlapping_readings(self):\n dup_import_record = ImportRecord.objects.create(owner=self.user, last_modified_by=self.user, super_organization=self.org)\n dup_filename = \"example-pm-monthly-meter-usage-1-dup.xlsx\"\n dup_filepath = os.path.dirname(os.path.abspath(__file__)) + \"/../data_importer/tests/data/\" + dup_filename\n\n dup_file = ImportFile.objects.create(\n import_record=dup_import_record,\n source_type=SEED_DATA_SOURCES[PORTFOLIO_METER_USAGE][1],\n uploaded_filename=dup_filename,\n file=SimpleUploadedFile(\n name=dup_filename,\n content=pathlib.Path(dup_filepath).read_bytes()\n ),\n cycle=self.cycle\n )\n\n url = reverse(\"api:v3:import_files-start-save-data\", args=[dup_file.id])\n url += f'?organization_id={self.org.pk}'\n post_params = {\n 'cycle_id': self.cycle.pk,\n }\n response = self.client.post(url, post_params)\n\n total_meters_count = Meter.objects.count()\n\n result_summary = json.loads(response.content)\n\n expected_import_summary = [\n {\n \"property_id\": self.property_1.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766973\",\n \"source_id\": \"5766973-0\",\n \"type\": \"Electric - Grid\",\n \"incoming\": 2,\n \"successfully_imported\": 2,\n \"errors\": \"\",\n },\n {\n \"property_id\": self.property_1.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766973\",\n \"source_id\": \"5766973-1\",\n \"type\": \"Natural Gas\",\n \"incoming\": 2,\n \"successfully_imported\": 2,\n \"errors\": \"\",\n },\n {\n \"property_id\": self.property_2.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766975\",\n \"source_id\": \"5766975-0\",\n \"type\": \"Electric - Grid\",\n \"incoming\": 4,\n \"successfully_imported\": 0,\n \"errors\": \"Overlapping readings.\",\n },\n {\n \"property_id\": self.property_2.id,\n \"cycles\": self.cycle.name,\n \"pm_property_id\": \"5766975\",\n \"source_id\": \"5766975-1\",\n \"type\": \"Natural Gas\",\n \"incoming\": 4,\n \"successfully_imported\": 0,\n \"errors\": \"Overlapping readings.\",\n },\n ]\n\n self.assertCountEqual(result_summary['message'], expected_import_summary)\n self.assertEqual(total_meters_count, 2)", "def hash_check_files(self):\n temp_error = 0\n if not self.hash_log_curr:\n self.hash_log_curr = self.hash_curr_files\n else:\n for key, value in self.hash_curr_files.iteritems():\n if key in self.hash_log_curr:\n #test for valid hash\n if self.valid is not None:\n #test any valid hahses are given\n if key in self.valid:\n # a hash code that is ok to duplicate\n self.print_to_log('Valid Duplicate HashCode, skipping: ' + value[5])\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n continue\n # not valid duplicate hash\n # a dupulicate hash found which is a failure and should abort import\n self.hash_log_curr[key][0] = 'Fail'\n self.hash_log_curr[key][3] = str(int(self.hash_log_curr[key][3]) + 1)\n self.hash_log_curr[key][4] = value[4]\n self.hash_log_curr[key][5] += ', ' + value[5]\n self.print_to_log('Duplicate hash found for file: ' + value[5])\n temp_error = 1\n else:\n #a new hash, no issues\n self.hash_log_curr[key] = value\n self.print_to_log('New Hash for file: ' + value[5])\n self.error = temp_error", "def test_upload_file(self):\n\n uploadFile = os.path.join(testdatadir, \"upload.data\")\n r = gracedb.writeFile(eventId, uploadFile)\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile, 'r').read(),\n gracedb.get(link).read()\n )\n\n # Re-upload slightly different file.\n uploadFile2 = os.path.join(testdatadir, \"upload2.data\")\n r = gracedb.writeFile(\n eventId,\n filename=\"upload.data\",\n filecontents=open(uploadFile2, 'r'))\n self.assertEqual(r.status, 201) # CREATED\n r_content = r.json()\n link2 = r_content['permalink']\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(gracedb.files(eventId).json()['upload.data']).read()\n )\n\n self.assertEqual(\n open(uploadFile2, 'r').read(),\n gracedb.get(link2).read()\n )\n\n self.assertNotEqual(link, link2)", "def upload_crashes(self, name, directory):\n logging.info('Not uploading crashes because no Filestore.')", "def testUniqueFileIDs(self):\n fileID = 'f' * 64\n self.store.add(OpaqueValue(fileID, 'content'))\n self.store.flush()\n self.store.add(OpaqueValue(fileID, 'content'))\n self.assertRaises(IntegrityError, self.store.flush)\n self.store.rollback()", "def test_log_filenames_file_not_found(self):\n\n filename = '/%s' % (uuid.uuid4())\n while os.path.exists(filename): # pragma: no cover\n filename = '/%s' % (uuid.uuid4())\n\n with self.assertRaises(Exception):\n self.app.log_filenames([filename])\n self.assertEqual(self.get_track_count(), 0)", "def test_upload_duplicate_file_upload(self):\n url = reverse('content-list')\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File',\n 'description': 'File 1',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 1)\n first_content = Content.objects.first()\n\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file 2.\\n\")\n content_file.seek(0)\n data = {\n 'name': 'Content File 2',\n 'description': 'File 2',\n 'content_file': content_file,\n 'updated_time': date.today(),\n 'creators': [],\n 'coverage': '',\n 'subjects': [],\n 'keywords': [],\n 'workareas': [],\n 'language': '',\n 'cataloger': ''\n }\n response = self.client.post(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n\n self.assertEqual(Content.objects.count(), 2)\n second_content = Content.objects.filter(name='Content File 2').first()\n\n # Duplicate File.\n with tempfile.NamedTemporaryFile(suffix='.txt') as content_file:\n content_file.write(b\"The contents of the temporary file.\\n\")\n content_file.seek(0)\n data = {\n 'content_file': content_file,\n }\n\n url = reverse('content-detail', args=[second_content.pk])\n response = self.client.patch(url, data, format='multipart')\n self.assertEqual(response.status_code, status.HTTP_409_CONFLICT)\n response_payload = json.loads(response.content.decode(\"utf-8\"))\n self.assertEqual(response_payload['result'], 'error')\n self.assertEqual(response_payload['error'], 'DUPLICATE_FILE_UPLOADED')\n self.assertRegex(\n response_payload['existing_content']['content_url'],\n '%s$' % reverse('content-detail', args=[first_content.pk])\n )\n self.assertRegex(\n response_payload['existing_content']['file_url'],\n '%s$' % first_content.content_file.url\n )", "def test_import_meter_usage_file_ignores_unknown_types_or_units(self):\n filename = \"example-pm-monthly-meter-usage-with-unknown-types-and-units.xlsx\"\n filepath = os.path.dirname(os.path.abspath(__file__)) + \"/data/\" + filename\n\n import_file_with_invalids = ImportFile.objects.create(\n import_record=self.import_record,\n source_type=SEED_DATA_SOURCES[PORTFOLIO_METER_USAGE][1],\n uploaded_filename=filename,\n file=SimpleUploadedFile(\n name=filename,\n content=pathlib.Path(filepath).read_bytes()\n ),\n cycle=self.cycle\n )\n\n url = reverse(\"api:v3:import_files-start-save-data\", args=[import_file_with_invalids.id])\n url += f'?organization_id={self.org.pk}'\n post_params = {\n 'cycle_id': self.cycle.pk,\n }\n self.client.post(url, post_params)\n\n self.assertEqual(3, Meter.objects.count())\n self.assertEqual(4, MeterReading.objects.count())\n\n refreshed_property_1 = Property.objects.get(pk=self.property_1.id)\n self.assertEqual(refreshed_property_1.meters.all().count(), 1)\n\n meter_1 = refreshed_property_1.meters.first()\n self.assertEqual(meter_1.meter_readings.all().count(), 2)\n\n refreshed_property_2 = Property.objects.get(pk=self.property_2.id)\n self.assertEqual(refreshed_property_2.meters.all().count(), 2)\n\n meter_2 = refreshed_property_2.meters.get(type=Meter.ELECTRICITY_GRID)\n self.assertEqual(meter_2.meter_readings.all().count(), 1)\n\n meter_3 = refreshed_property_2.meters.get(type=Meter.NATURAL_GAS)\n self.assertEqual(meter_3.meter_readings.all().count(), 1)", "def processTempLog(file_name):", "def check_duplicated_data(self, path, target):\n files_in_path = [file for file in self.get_csv_in_path(path)]\n print(\"check duplicated for file {} in path {} , files\".format(target, path))\n if target in files_in_path:\n print('The {} is already exist'.format(target))\n return True\n return False", "def test_var_not_set_same_logging_file(set_tempdir):\n tasks = run_n_simple_tasks(5)\n for task in tasks:\n log_path_matcher = LogPathCorrectnessMatcher(default_log_path(task[\"jobid\"]))\n log_path = UsedLogPath(task)\n assert log_path == log_path_matcher", "def test_02_not_exist(self):\n with self.assertLogs(\"borg\", \"WARNING\") as logger:\n self.api.extract(self.archive, self.file_3)\n message = logger.records[0].getMessage()\n self.assertRegex(\n message,\n r\".*?file_3.*never\",\n \"Warning not logged for bad path\",\n )", "def test_detect_duplicate_upload_items(duplicate_items: List[JSONDict]) -> None:\n\n with pytest.raises(ValidationError) as e:\n invalid_collection = UploadCollection(items=duplicate_items) # noqa: F841\n\n assert e.value.errors() == [\n {\n \"loc\": (\"items\",),\n \"msg\": \"Duplicate item guids detected: ['http://www.crimsonhexagon.com/post1']\",\n \"type\": \"value_error\",\n }\n ]", "def test_file_conflict(self):\n dir0, dir1 = self.make_temp_dirs(2)\n self.write_file(dir0, \"foo\")\n self.sync_all()\n\n self.write_file(dir0, \"foo\", \"bar\")\n time.sleep(0.1)\n self.write_file(dir1, \"foo\", \"baz\")\n self.sync_all()\n # File with later mtime wins\n self.assertFile(dir0, \"foo\", \"baz\")\n self.assertFile(dir1, \"foo\", \"baz\")", "def test_preexisting_custom_log_file(set_tempdir, mock_settings_env_vars):\n temp_dir = set_tempdir\n custom_log_path = Path(temp_dir) / \"main.log\"\n log_path_matcher = LogPathCorrectnessMatcher(custom_log_path)\n os.environ[LOG_ENV_VARIABLE_NAME] = str(custom_log_path)\n file_content = \"This existing file has content.\"\n with open(custom_log_path, \"a\") as f:\n f.write(file_content)\n\n tasks = run_n_simple_tasks(1)\n\n log_path = UsedLogPath(tasks[0])\n assert log_path == log_path_matcher\n\n with open(custom_log_path, \"r\") as f:\n log_content = f.read()\n assert file_content in log_content", "def test_upload_existing_empty_file(self):\n ps = PersistenceStore(s3_client=S3ExistingEmptyUpload())\n\n with pytest.raises(Exception) as e:\n ps.update({}, 'filename.json')\n\n assert str(e.value) == 'Unable to get the json data path: ' \\\n 'developer-analytics-audit-report/filename.json'", "def duplicate_timestamp_path(existing_path):\n logfile = parse.parse_filename(existing_path)\n index = 0\n while index < 25:\n if index == 0:\n suffix = ''\n else:\n suffix = '-%02d' % index\n\n new_path = parse.unparse_filename(\n (\n logfile.prefix +\n '-logjam-compress-duplicate-timestamp' +\n suffix\n ),\n logfile.timestamp,\n logfile.suffix,\n logfile.extension\n )\n if not os.path.exists(new_path):\n return new_path\n\n index += 1\n\n raise Exception('%d duplicate timestamp paths detected.' % index)", "def test_differ_times_one_file(generate_differ_times_one_file):\n fname = generate_differ_times_one_file\n with pytest.raises(Exception):\n process_files([fname])", "def test_rotateAlreadyExists(self):\n log = RiggedDailyLogFile(self.name, self.dir)\n self.addCleanup(log.close)\n\n # Build a new file with the same name as the file which would be created\n # if the log file is to be rotated.\n newFilePath = \"{}.{}\".format(log.path, log.suffix(log.lastDate))\n with open(newFilePath, \"w\") as fp:\n fp.write(\"123\")\n previousFile = log._file\n log.rotate()\n self.assertEqual(previousFile, log._file)", "def test_create4(self):\n TempfileManager.sequential_files(2)\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 1)\n fname = os.path.basename(fname)\n self.assertEqual(fname, 'tmp2')\n #\n TempfileManager.unique_files()\n fname = TempfileManager.create_tempfile()\n OUTPUT = open(fname, 'w')\n OUTPUT.write('tempfile\\n')\n OUTPUT.close()\n self.assertEqual(len(list(glob.glob(tempdir + '*'))), 2)\n fname = os.path.basename(fname)\n self.assertNotEqual(fname, 'tmp3')\n self.assertTrue(fname.startswith('tmp'))", "def test_upload_no_overwrite(self):\n request = DummyRequest()\n request.access = DummyAccess(request)\n cache = DummyCache(request)\n request.access.allow_overwrite = []\n name, version, filename = \"a\", \"1\", \"a-1.tar.gz\"\n cache.upload(filename, BytesIO(b\"test1234\"), name, version)\n with self.assertRaises(ValueError):\n cache.upload(filename, BytesIO(b\"test1234\"), name, version)", "def test_log_filenames_multiple_no_date(self):\n now = datetime.datetime.now()\n (tracks, statuses) = self.app.log_filenames([self.track_path('silence.mp3')]*5)\n self.assertEqual(len(tracks), 5)\n self.assertEqual(self.get_track_count(), 5)\n track_objs = []\n for (idx, track) in enumerate(tracks):\n with self.subTest(idx=idx):\n track_obj = self.get_track_by_id(track.pk)\n track_objs.append(track_obj)\n self.assertLess(track_obj['timestamp'], now)\n if idx > 0:\n self.assertGreater(track_obj['timestamp'],\n track_objs[idx-1]['timestamp'])" ]
[ "0.67010015", "0.638992", "0.6376859", "0.6261966", "0.624463", "0.62304544", "0.62001187", "0.6198904", "0.6182989", "0.61653274", "0.6137007", "0.6109847", "0.6075311", "0.60572684", "0.6047644", "0.60395145", "0.60256505", "0.6012684", "0.5980237", "0.595728", "0.5917336", "0.59142685", "0.59142464", "0.591057", "0.5898938", "0.587316", "0.58493876", "0.5838887", "0.5819111", "0.58135176" ]
0.7041636
0
Computes the value of an entry by running its task. Requires that all the task's dependencies are already computed.
def compute(self, context): # TODO There are a few cases here where we acccess private members on # self.state; should we clean this up? state = self.state task = state.task protocol = state.desc_metadata.protocol assert state.is_initialized assert not state.is_cached assert task is not None, (state.task_key, self.level) dep_results = [] for dep_entry, dep_key in zip(self.dep_entries, task.dep_keys): assert dep_entry._is_cached dep_result = dep_entry.get_cached_result(context) dep_results.append(dep_result) if not task.is_simple_lookup: context.task_key_logger.log_computing(state.task_key) dep_values = [dep_result.value for dep_result in dep_results] # If we have any missing outputs, exit early with a missing result. if state.output_would_be_missing(): result = Result( task_key=task.key, value=None, local_artifact=None, value_is_missing=True, ) value_hash = "" # TODO Should we do this even when memoization is disabled? state._result = result if state.should_persist: state._result_value_hash = value_hash return result else: # If we have no missing outputs, we should not be consuming any missing # inputs either. assert not any( dep_key.case_key.has_missing_values for dep_key in task.dep_keys ) value = task.compute(dep_values) if task.is_simple_lookup: context.task_key_logger.log_accessed_from_definition(state.task_key) else: context.task_key_logger.log_computed(state.task_key) protocol.validate_for_dnode(task.key.dnode, value) result = Result( task_key=task.key, value=value, local_artifact=None, ) if state.should_persist: artifact = state._local_artifact_from_value(result.value, context) state._cache_accessor.save_local_artifact(artifact) state._result_value_hash = artifact.content_hash # If we're not persisting the result, this is our only chance to memoize it; # otherwise, we can memoize it later if/when we load it from get_cached_result. # (It's important to memoize the value we loaded, not the one we just computed, # because they may be subtly different and we want all downstream tasks to get # exactly the same value.) elif state.should_memoize: state._result = result else: self.context.temp_result_cache.save(result)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _compute(self, task_key_logger):\n\n task = self.task\n\n dep_results = [\n dep_state.get_results_assuming_complete(task_key_logger)[\n dep_key.dnode.to_entity_name()\n ]\n for dep_state, dep_key in zip(self.dep_states, task.dep_keys)\n ]\n\n provider = self.provider\n\n if not task.is_simple_lookup:\n for task_key in task.keys:\n task_key_logger.log_computing(task_key)\n\n dep_values = [dep_result.value for dep_result in dep_results]\n\n values = task.compute(dep_values)\n assert len(values) == len(provider.attrs.names)\n\n for query in self._queries:\n if task.is_simple_lookup:\n task_key_logger.log_accessed_from_definition(query.task_key)\n else:\n task_key_logger.log_computed(query.task_key)\n\n results_by_name = {}\n result_value_hashes_by_name = {}\n for ix, (query, value) in enumerate(zip(self._queries, values)):\n query.protocol.validate(value)\n\n result = Result(query=query, value=value,)\n\n if provider.attrs.should_persist():\n accessor = self._cache_accessors[ix]\n accessor.save_result(result)\n\n value_hash = accessor.load_result_value_hash()\n result_value_hashes_by_name[query.dnode.to_entity_name()] = value_hash\n\n results_by_name[query.dnode.to_entity_name()] = result\n\n # Memoize results at this point only if results should not persist.\n # Otherwise, load it lazily later so that if the serialized/deserialized\n # value is not exactly the same as the original, we still\n # always return the same value.\n if provider.attrs.should_memoize() and not provider.attrs.should_persist():\n self._results_by_name = results_by_name\n\n # But we cache the hashed values eagerly since they are cheap to load.\n if provider.attrs.should_persist():\n self._result_value_hashes_by_name = result_value_hashes_by_name", "def __call__(self, task):\n self.put(task)\n return self.get()", "def task(self, value):\n git_url = value['given']['git_url']\n\n repo_url_SQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(git_url))\n rs = pd.read_sql(repo_url_SQL, self.db, params={})\n\n try:\n repo_id = int(rs.iloc[0]['repo_id'])\n if value['job_type'] == \"UPDATE\":\n self._queue.put(CollectorTask(message_type='TASK', entry_info={\"task\": value, \"repo_id\": repo_id}))\n elif value['job_type'] == \"MAINTAIN\":\n self._maintain_queue.put(CollectorTask(message_type='TASK', entry_info={\"task\": value, \"repo_id\": repo_id}))\n if 'focused_task' in value:\n if value['focused_task'] == 1:\n self.finishing_task = True\n\n except Exception as e:\n logger.error(f\"error: {e}, or that repo is not in our database: {value}\")\n\n self._task = CollectorTask(message_type='TASK', entry_info={\"task\": value})\n self.run()", "def task(self, value):\n github_url = value['given']['github_url']\n\n repo_url_SQL = s.sql.text(\"\"\"\n SELECT min(repo_id) as repo_id FROM repo WHERE repo_git = '{}'\n \"\"\".format(github_url))\n rs = pd.read_sql(repo_url_SQL, self.db, params={})\n\n try:\n repo_id = int(rs.iloc[0]['repo_id'])\n if value['job_type'] == \"UPDATE\" or value['job_type'] == \"MAINTAIN\":\n self._queue.put(value)\n if 'focused_task' in value:\n if value['focused_task'] == 1:\n self.finishing_task = True\n\n except Exception as e:\n logging.error(f\"error: {e}, or that repo is not in our database: {value}\\n\")\n\n self._task = value\n self.run()", "def _completed_callback(\n self, value: VT, entry: \"CacheEntry[KT, VT]\", key: KT\n ) -> VT:\n # We check if the current entry matches the entry associated with the\n # deferred. If they don't match then it got invalidated.\n current_entry = self._pending_deferred_cache.pop(key, None)\n if current_entry is not entry:\n if current_entry:\n self._pending_deferred_cache[key] = current_entry\n return value\n\n self.cache.set(key, value, entry.get_invalidation_callbacks(key))\n\n return value", "def run(inputs):\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)", "def run_operation(task):\n return task.run()", "def retrieve_task(self, task_key, version):\n\n d = Deferred()\n\n pkg_name = task_key[:task_key.find('.')]\n needs_update = False\n with self._lock:\n\n # get the task. if configured for lazy init, this class will only\n # attempt to load a task into the registry once it is requested.\n # subsequent requests will pull from the registry.\n pkg = self.registry.get( (pkg_name, version), None)\n if not pkg and self.lazy_init:\n logger.debug('Lazy Init: %s' % pkg_name)\n pkg = self.init_package(pkg_name, version)\n\n if pkg:\n pkg_status = pkg.status\n if pkg_status == packaging.STATUS_OUTDATED:\n # package has already entered a sync process;\n # append the callback\n self._task_callbacks[pkg_name].append((task_key, d))\n task_class = pkg.tasks.get(task_key, None)\n\n if task_class and (version is None or pkg.version == version):\n module_path, cycle = self._compute_module_search_path(\n pkg_name)\n if cycle:\n d.errback(\n (task_key, pkg.version, 'Cycle detected in dependency'))\n else:\n d.callback((task_key, version, task_class,\n module_path))\n else:\n # needs update\n pkg.status = packaging.STATUS_OUTDATED\n needs_update = True\n else:\n # no local package contains the task with the specified\n # version, but this does NOT mean it is an error -\n # try synchronizing tasks first\n needs_update = True\n\n if needs_update:\n self.emit('TASK_OUTDATED', pkg_name, version)\n self._task_callbacks[pkg_name].append((task_key, d))\n\n return d", "def do_one_task(entry, channel):\n if channel not in [\"mu\", \"ele\"]:\n raise RuntimeError(\"channel arg must be mu or ele\")\n\n output_file = os.path.join(OUTPUT_DIR, entry.outname + \"_%s.root\" % channel)\n\n if os.path.isfile(output_file):\n print \"! Output file already exists - skipping this task\"\n return\n\n crab_dir = \"crab_%s_%s_my_feature\" % (entry.taskname, channel)\n\n status_dict = get_job_status(crab_dir)\n print status_dict\n # if not status_dict['finished']:\n # print \"crab jobs not finished - skipping\"\n # return\n\n sample_dir = entry.dataset.split(\"/\")[1]\n date_str = status_dict['task_name'].split(\":\")[0]\n input_str = os.path.join(NAF_DIR, sample_dir, crab_dir, date_str, \"0000\", \"tree_%s_*.root\" % channel)\n\n # actually do the hadding\n if RUN_ON_BIRD:\n qsub_command = \"\"\"qsub -N %s -v OUTPUTF=\"%s\",INPUTF=\"%s\" qsub_hadd.sh\"\"\" % (entry.taskname, output_file, input_str)\n # print qsub_command # Uncomment this line when testing to view the qsub command\n subprocess.check_call(qsub_command, shell=True)\n else:\n hadd_cmd = \"hadd %s %s\" % (output_file, input_str)\n print hadd_cmd\n subprocess.check_output(hadd_cmd, shell=True) # need shell=True for wildcard expansion?", "def get_result(self, task: Task):\n # Basic bookkeeping\n assert task.chunk_i in self.pending_is\n self.pending_is.discard(task.chunk_i)\n self._highest_done_i = max(self._highest_done_i, task.chunk_i)\n if self.highest_continuous_done_i == self.final_task_i:\n self.all_results_arrived = True\n\n # Fetch result\n if not task.future.done():\n raise RuntimeError(\"get_result called before task was done\")\n result = task.future.result() # Will raise if exception\n\n # Record new inputs\n if self.changing_inputs:\n if task.is_final:\n new_inputs = {}\n else:\n assert isinstance(result, tuple) and len(result) == 2, \\\n f\"{self} changes inputs but returned a {type(result)} \" \\\n f\"rather than a two-tuple\"\n result, new_inputs = result\n self.wants_input = {dt: self.seen_input[dt] + 1\n for dt in new_inputs}\n\n # Check and return result\n if result is None:\n assert task.is_final, f\"{task} is not final but returned None\"\n else:\n self._validate_results(task, result)\n return result", "def task_calc():\n return 'What is the result of the expression?'", "def compute(self, node, input_vals):\n assert False, \"placeholder values provided by feed_dict\"", "def run_task(self) -> Task:", "def compute(self):\n try:\n self.set_trackline()\n except:\n app.logger.warning(\"Could not process trackline results. URL may be invalid?\")\n\n if Job.exists(self.task_id, connection=redis_connection):\n job = Job.fetch(self.task_id, connection=redis_connection)\n self.task_result = unicode(job.meta.get(\"outcome\", \"\"))\n\n self.save()", "def evaluate(self, tick, task, inputs, nosend_ports=None, fail_on_unexpected_nosend=False):\n\n logger.debug(\"Transfers for job %s\" % tick)\n\n ports = []\n transfers = []\n transfer_results = {}\n for port, (valueid, worker) in inputs.iteritems():\n \n \n d = self.fetch_from(worker, valueid)\n \n def transfer_completed(transfer_result, valueid, port):\n if transfer_result: # `None` if the value was already present\n transfer_results[port] = transfer_result\n return self.get_value(valueid)\n \n\n d.addCallback(transfer_completed, valueid, port)\n ports.append(port)\n transfers.append(d)\n \n d = defer.DeferredList(transfers)\n \n def run(inputs):\n \"\"\"\n Runs in separate thread.\n \"\"\"\n logger.debug(\"Running job %s\" % tick)\n \n #start = time.clock()\n start = datetime.datetime.now()\n try:\n result = task.evaluate(inputs)\n except:\n result = failure.Failure()\n finally:\n #end = time.clock()\n end = datetime.datetime.now()\n \n logger.debug(\"Running job %s finished\" % tick)\n \n #duration = end - start\n duration = (end - start).total_seconds()\n return traverser.EvalResult(result, duration)\n \n @twistit.yieldefer\n def got_all(results):\n \n logger.debug(\"Transfers for job %s finished\" % tick)\n \n values = []\n for success, result in results:\n if not success:\n if result.check(pickle.PickleError):\n raise pickle.PickleError(\"Failed to unpickle input of %r.%r: %s\" %(tick, port, result))\n else:\n result.raiseException()\n else:\n values.append(result)\n\n inputs = dict(zip(ports, values))\n \n evalresult = yield threads.deferToThread(run, inputs)\n \n if not isinstance(evalresult.result, dict) and not isinstance(evalresult.result, failure.Failure):\n raise ValueError(\"Evaluation of task %r did not produce a dict or a failure. Got %r.\" % (task, evalresult.result))\n \n defer.returnValue(evalresult)\n \n def task_completed(evalresult):\n if isinstance(evalresult.result, dict):\n \n # Injest values into our store and replace the eval results with ValueIds.\n outputs = evalresult.result\n outs = {}\n datasizes = {}\n for port, value in outputs.iteritems():\n valueid = ValueId(graph.Endpoint(tick, port))\n \n pickle_supported = True\n if nosend_ports and port in nosend_ports:\n pickle_supported = False\n \n try:\n size = self.set_value(valueid, \n value, \n pickle_supported, \n pickle_supported and fail_on_unexpected_nosend)\n except NoPickleError as e:\n e = NoPickleError(\"Value of output port %r cannot be pickled.\" % port,\n cause=e.cause)\n # TODO: memory leak. We should remove the values we've set in\n # previous loop iterations.\n raise e\n \n outs[port] = valueid\n if size is not None:\n datasizes[port] = size \n \n evalresult.result = outs\n evalresult.datasizes = datasizes\n evalresult.transfer_results = transfer_results\n return evalresult\n \n d.addCallback(got_all)\n d.addCallback(task_completed)\n return d", "def compute_value(self, *args, **kwargs):\n\n return None", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)", "def evaluate_one_task(prediction_file, label_file, task, language=None):\n predictions = READER_FUNCTION[task](prediction_file)\n labels = READER_FUNCTION[task](label_file)\n if task not in ['bucc2018', 'mlqa', 'tydiqa', 'xquad']:\n assert len(predictions) == len(labels), 'Number of examples in {} and {} not matched in {} task'.format(prediction_file, label_file, task)\n result = METRIC_FUNCTION[task](labels, predictions, language)\n return result", "def solve(task: str) -> int:\n numbers = process_data(task)\n first, second = find_2020_summands(numbers)\n return first * second", "def compute(self) -> Any:\n self._check_for_increment(\"compute\")\n return self[-1].compute()", "def value(self):\n self.wait(0)\n try:\n value = self._value\n except AttributeError:\n raise AttributeError('Deferred value not available')\n if isinstance(value, TaskFailure):\n raise value\n return value", "def eval_task(index, child_conns, parent_conns, shared_data, task, type):\n if type == 'GPU':\n set_gpu_device(index)\n if task.do_redirect:\n sys.stdin = file(os.devnull)\n sys.stdout = file(os.devnull)\n if task.do_redirect is None and os.name == 'posix':\n log_warn(\"WARNING: specify do_redirect=True if CUDA code is not\\\n compiling. see \\\n <http://playdoh.googlecode.com/svn/docs/playdoh.html#gpu>\")\n log_info(\"Evaluating task on %s #%d\" % (type, index + 1))\n # shared data: if there is shared data, pass it in the task's kwds\n # task fun must have fun(..., shared_data={})\n if len(shared_data) > 0:\n task.kwds['shared_data'] = shared_data\n result = task.fun(*task.args, **task.kwds)\n# log_debug(\"Task successfully evaluated on %s #%d...\" % (type, index))\n if type == 'GPU':\n# set_gpu_device(0)\n close_cuda() # ensures that the context specific to the process is\n # closed at the process termination\n child_conns[index].send(result)", "def __call__(self, *args, **kwargs):\n key = None\n value = None\n memoization_key = None\n\n if self._memoize:\n memoization_key = self._get_memoization_key(*args, **kwargs)\n if memoization_key in self._cached_results:\n return self._cached_results[memoization_key]\n\n if self._cache:\n key = self.get_cache_key(*args, **kwargs)\n value = cache_backend.get(key)\n\n if value is None:\n value = self._fn(*self._inject_obj(args), **kwargs)\n\n if self._cache:\n cache_backend.set(key, value, timeout=self._timeout)\n\n if self._memoize:\n self._cached_results[memoization_key] = value\n\n return value", "def run_main_task(entry_id, task_fcn, action_name):\r\n\r\n # get the InstructorTask to be updated. If this fails, then let the exception return to Celery.\r\n # There's no point in catching it here.\r\n entry = InstructorTask.objects.get(pk=entry_id)\r\n\r\n # get inputs to use in this task from the entry:\r\n task_id = entry.task_id\r\n course_id = entry.course_id\r\n task_input = json.loads(entry.task_input)\r\n\r\n # construct log message:\r\n fmt = u'task \"{task_id}\": course \"{course_id}\" input \"{task_input}\"'\r\n task_info_string = fmt.format(task_id=task_id, course_id=course_id, task_input=task_input)\r\n\r\n TASK_LOG.info('Starting update (nothing %s yet): %s', action_name, task_info_string)\r\n\r\n # Check that the task_id submitted in the InstructorTask matches the current task\r\n # that is running.\r\n request_task_id = _get_current_task().request.id\r\n if task_id != request_task_id:\r\n fmt = u'Requested task did not match actual task \"{actual_id}\": {task_info}'\r\n message = fmt.format(actual_id=request_task_id, task_info=task_info_string)\r\n TASK_LOG.error(message)\r\n raise ValueError(message)\r\n\r\n # Now do the work:\r\n with dog_stats_api.timer('instructor_tasks.time.overall', tags=['action:{name}'.format(name=action_name)]):\r\n task_progress = task_fcn(entry_id, course_id, task_input, action_name)\r\n\r\n # Release any queries that the connection has been hanging onto:\r\n reset_queries()\r\n\r\n # log and exit, returning task_progress info as task result:\r\n TASK_LOG.info('Finishing %s: final: %s', task_info_string, task_progress)\r\n return task_progress", "def execute(self):\n if len(self._tasks) == 0:\n if self.allow_empty:\n LOG.info(_(\"WrapperTask %s has no Subtasks; no-op execution.\"),\n self.name)\n return None\n raise ex.WrapperTaskNoSubtasks(name=self.name)\n\n @entry_transaction\n def _execute(wrapper):\n update_needed = False\n for task in self._tasks:\n kwargs = task.save_kwargs\n if ('provided' in reflection.get_callable_args(task.execute)\n or reflection.accepts_kwargs(task.execute)):\n kwargs['provided'] = self.provided\n ret = task.execute(wrapper, *task.save_args, **kwargs)\n if task.flag_update and ret:\n update_needed = True\n if task.provides is not None:\n self.provided[task.provides] = ret\n if update_needed:\n wrapper = wrapper.update(timeout=self.update_timeout)\n return wrapper\n # Use the wrapper if already fetched, or the getter if not\n # NOTE: This assignment must remain atomic. See TAG_WRAPPER_SYNC.\n self._wrapper = _execute(self._wrapper or self._getter)\n return self._wrapper, self.provided", "def task():", "def run(self, run_number):\n return self[self.run_cache[run_number]]", "def execute(self):\n # Ensure a true no-op (in particular, we don't want to GET the feed) if\n # there are no Subtasks\n if not any([self._tx_by_uuid, self._common_tx.subtasks,\n self._post_exec]):\n LOG.info(_(\"FeedTask %s has no Subtasks; no-op execution.\"),\n self.name)\n return\n rets = {'wrapper_task_rets': {}}\n try:\n # Calling .wrapper_tasks will cause the feed to be fetched and\n # WrapperTasks to be replicated, if not already done. Only do this\n # if there exists at least one WrapperTask with Subtasks.\n # (NB: It is legal to have a FeedTask that *only* has post-execs.)\n if self._tx_by_uuid or self._common_tx.subtasks:\n pflow = tf_uf.Flow(\"%s_parallel_flow\" % self.name)\n pflow.add(*self.wrapper_tasks.values())\n # Execute the parallel flow now so the results can be provided\n # to any post-execs.\n rets['wrapper_task_rets'] = self._process_subtask_rets(\n tf_eng.run(\n pflow, engine='parallel',\n executor=ContextThreadPoolExecutor(self.max_workers)))\n if self._post_exec:\n flow = tf_lf.Flow('%s_post_execs' % self.name)\n flow.add(*self._post_exec)\n eng = tf_eng.load(flow, store=rets)\n eng.run()\n rets = eng.storage.fetch_all()\n except tf_ex.WrappedFailure as wfail:\n LOG.error(_(\"FeedTask %s experienced multiple exceptions. They \"\n \"are logged individually below.\"), self.name)\n for fail in wfail:\n LOG.exception(fail.pformat(fail.traceback_str))\n raise ex.MultipleExceptionsInFeedTask(self.name, wfail)\n\n # Let a non-wrapped exception (which happens if there's only one\n # element in the feed) bubble up as-is.\n\n return rets", "def task_wrapper(\n self, key: str, task: Task, executor: \"TaskGraphExecutor\"\n ) -> Callable[[Task], Task]:", "def _reduce_row(self, entry):\n # Identify the runs to be used for reduction\n runs = run_list(entry, \"refl\")\n directs = run_list(entry, \"directs\")\n\n if self.verbose:\n fmt = \"Reducing %s [%s]/[%s]\"\n\n print(\n fmt\n % (\n entry[\"name\"],\n \", \".join(\"%d\" % r for r in runs),\n \", \".join(\"%d\" % r for r in directs),\n )\n )\n sys.stdout.flush() # keep progress updated\n\n if not runs:\n warnings.warn(\n \"Row %d (%s) has no reflection runs. Skipped.\"\n % (entry[\"source\"], entry[\"name\"])\n )\n return None, None\n if not directs:\n warnings.warn(\n \"Row %d (%s) has no direct beam runs. Skipped.\"\n % (entry[\"source\"], entry[\"name\"])\n )\n return None, None\n\n if len(runs) > len(directs):\n warnings.warn(\n \"Row %d (%s) has differing numbers of\"\n \" direct & reflection runs. Skipped.\"\n % (entry[\"source\"], entry[\"name\"])\n )\n return None, None\n\n ds, fname = reduce_stitch(\n runs,\n directs,\n trim_trailing=self.trim_trailing,\n data_folder=self.data_folder,\n reduction_options=self.reduction_options,\n prefix=self.prefix,\n )\n\n return ds, fname" ]
[ "0.63904375", "0.59485996", "0.57995343", "0.5572648", "0.5520435", "0.55173826", "0.54368013", "0.53980225", "0.53946394", "0.53903", "0.53770065", "0.5371793", "0.53237903", "0.53095233", "0.52732223", "0.5251898", "0.52199835", "0.52119595", "0.5201217", "0.51943415", "0.5193258", "0.51785046", "0.51772386", "0.5162238", "0.51589197", "0.5145932", "0.51398903", "0.5124261", "0.5120207", "0.51152563" ]
0.7053696
0
Indicates whether the task state's result is cached.
def is_cached(self): if self.should_persist: # If our value is persistable, it can be saved either on disk or in memory, # but only the former counts as being officially "cached". return self._result_value_hash is not None else: return self._result is not None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_cached(self):\n return False", "def is_cached(name, typ=\"pkl\"):\n return os.path.exists(cache_name(name, typ))", "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def has_cache(self):\n try:\n cache = self.get_cache()\n return True\n except (KeyError, AttributeError):\n return False", "def cache_hit(self):\n return self._properties.get(\"cacheHit\")", "def getCacheable(self):\n return False", "def _isCached (self):\n\t\tself.props['ncjobids'] = range(self.length)\n\t\tif self.cache == False:\n\t\t\tself.log ('Not cached, because proc.cache is False', 'debug')\n\t\t\treturn False\n\t\t\n\t\tif self.cache == True:\n\t\t\tfor depend in self.depends:\n\t\t\t\tif depend.cached: continue\n\t\t\t\tself.log ('Not cached, my dependent \"%s\" is not cached.' % depend._name(), 'debug')\n\t\t\t\treturn False\n\t\t\n\t\ttrulyCachedJids = []\n\t\texptCachedJids = []\n\t\tself.props['ncjobids'] = []\n\t\tfor i, job in enumerate(self.jobs):\n\t\t\tjob = self.jobs[i]\n\t\t\tif job.isTrulyCached ():\n\t\t\t\ttrulyCachedJids.append(i)\n\t\t\telif job.isExptCached ():\n\t\t\t\texptCachedJids.append (i)\n\t\t\telse:\n\t\t\t\tself.props['ncjobids'].append (i)\n\t\t\t\t\n\t\tself.log ('Truely cached jobs: %s' % (trulyCachedJids if len(trulyCachedJids) < self.length else 'ALL'), 'debug')\n\t\tself.log ('Export cached jobs: %s' % (exptCachedJids if len(exptCachedJids) < self.length else 'ALL'), 'debug')\n\t\t\n\t\tif self.ncjobids:\n\t\t\tif len(self.ncjobids) < self.length:\n\t\t\t\tself.log ('Partly cached, only run non-cached %s job(s).' % len(self.ncjobids), 'info')\n\t\t\t\tself.log ('Jobs to be running: %s' % self.ncjobids, 'debug')\n\t\t\telse:\n\t\t\t\tself.log ('Not cached, none of the jobs are cached.', 'info')\n\t\t\treturn False\n\t\telse:\n\t\t\tself.log (self.workdir, 'info', 'CACHED')\n\t\t\treturn True", "def cached(self, key):\n return key in self._cache", "def __cached(self):\n # already cached stuff\n if self._cached is None:\n self._cached = Cached(self.resource)\n return self._cached", "def is_locked(self):\n return cache.get(self.id)", "def incache(self, query):\n key = genkey(query)\n res = self.memcache.get(key)\n if res and type(res) is int:\n return True\n return False", "def required_cache(self):\n return self._required_cache", "def _check_cache(self):\n return os.path.exists(self._cache_key)", "def cache_is_valid():\n if ARGV.get(NOCACHE_OPT):\n return False\n if not CACHE['last-request'] \\\n or not CACHE['max-age'] \\\n or not CACHE['feed']:\n return False\n current_time = float(time.time())\n last_request = float(CACHE['last-request'])\n max_age = float(CACHE['max-age'])\n return bool(current_time - last_request < max_age)", "def is_remote_cached(cls, target_filename):\n is_cached = None\n cache = cls.CACHE_BACKEND()\n for file_name, file_id in cache.search():\n if file_name == os.path.basename(target_filename):\n is_cached = file_id\n logger.debug('File %r already cached at %r', target_filename, cls.CACHE_BACKEND)\n break\n return is_cached", "def is_hash_locally_cached(self, ipfs_hash: str, ipfs_refs_local=None) -> bool:\n output = run([\"ipfs\", \"files\", \"stat\", \"--with-local\", \"--size\", f\"/ipfs/{ipfs_hash}\"])\n if \"(100.00%)\" in output:\n log(\"already fully cached\", \"green\")\n log(output)\n return True\n else:\n log(\"not fully cached\", \"red\")\n log(output)\n return False", "def needs_update(self, cache_key):\r\n return self._read_sha(cache_key) != cache_key.hash", "def cached(self, args) -> bool:\n return all([art.built for art in self.artifacts])", "def isCached(filename, hash):\r\n path = cachePath(filename)\r\n if not os.path.exists(path):\r\n return False\r\n \r\n return hash == hashlib.sha1(open(path, 'rb').read()).hexdigest()", "def is_sim_layer_cached(self):\n layers = [self._wrap_ns(self.setup_config[\"sim_layer\"])]\n input_nodes = self.get_layer_nodes_info(layers)\n return self.is_node_cached(input_nodes.values()[0])", "def __cached_scrape_available(song_name, artist):\n cache_path = __cache_path(song_name, artist)\n\n return op.exists(cache_path)", "def _use_temp_cache(self):\n # If full tensors need to be stored tf.variables, then do not use temp\n # variables to store them.\n if self._use_tensor_buffer():\n return False\n if self._use_tensor_values_cache():\n return self._parameters.use_temp_cache_var\n else:\n # Temporary caches only replaces tf.Variables caches. If no cache is used\n # return False.\n return False", "def should_reset(self):\n # type: () -> bool\n if not self._is_cache_enabled():\n return False\n elapsed = time.time() - self._last_ts\n return elapsed > self._refresh_interval_sec", "def is_safe_cache(self):\n if self.get_last_update() > self.timestamp:\n return False\n return True", "def _cache_has(self, metric_name):\n pass", "def _result_already_returned(self):\n return self.deferred.called", "def check_artifact_cache(self, vts):\r\n return self.do_check_artifact_cache(vts)", "def cache_result(func):\n\n @wraps(func)\n def with_cache(*args, **kwargs):\n \"\"\"\n Cached function\n \"\"\"\n key = '{}{}{}'.format(\n hash(func), hash(args), hash(frozenset(kwargs.items())))\n\n cached_result = cache.get(key)\n if cached_result is not None:\n return cached_result if cached_result != 'None' else None\n result = func(*args, **kwargs)\n cache.set(key, result if result is not None else 'None')\n\n return result\n\n return with_cache", "def enable_caching_acts_data() -> bool:\n return True", "def _cache_has(self, metric_name):\n with self._lock:\n return metric_name in self.__cache" ]
[ "0.74749434", "0.68324715", "0.68149173", "0.6738329", "0.6655201", "0.66187906", "0.65679663", "0.6416865", "0.6415875", "0.6382452", "0.6292968", "0.62685287", "0.62240446", "0.6211946", "0.6159335", "0.61113095", "0.60873556", "0.60792947", "0.6072414", "0.6051632", "0.6046242", "0.60414934", "0.60294247", "0.6002581", "0.5999916", "0.59835416", "0.5974548", "0.59718305", "0.5963789", "0.5957003" ]
0.79039913
0
Loads the hash of the persisted value for this task, if it exists. If the persisted value is available in the cache, this object's `is_cached` property will become True. Otherwise, nothing will happen.
def attempt_to_access_persistent_cached_value(self): assert self.is_initialized assert not self.is_cached if not self.should_persist: return if not self._cache_accessor.can_load(): return self._load_value_hash()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_value_hash(self):\n\n artifact = self._cache_accessor.load_artifact()\n if artifact is None or artifact.content_hash is None:\n raise AssertionError(\n oneline(\n f\"\"\"\n Failed to load cached value (hash) for descriptor\n {self._cache_accessor.provenance.descriptor!r}.\n This suggests we did not successfully compute the task\n in a subprocess, or the entity wasn't cached;\n this should be impossible!\"\"\"\n )\n )\n self._result_value_hash = artifact.content_hash", "def is_cached(self):\n if self.should_persist:\n # If our value is persistable, it can be saved either on disk or in memory,\n # but only the former counts as being officially \"cached\".\n return self._result_value_hash is not None\n else:\n return self._result is not None", "def _get_cached_value(self, value):\n\n if self._refreshable is True and self.is_expired is False:\n self.refresh()\n\n return deepcopy(value)", "def refresh_all_persistent_cache_state(self, context):\n\n # If this task state is not initialized or not persisted, there's nothing to\n # refresh.\n if not self.is_initialized or not self.should_persist:\n return\n\n self.refresh_cache_accessor(context)\n\n # If we haven't loaded anything from the cache, we can stop here.\n if self._result_value_hash is None:\n return\n\n # Otherwise, let's update our value hash from the cache.\n if self._cache_accessor.can_load():\n self._load_value_hash()\n else:\n self._result_value_hash = None", "def _get_cached_value(self, value):\n\n return value", "def load(self,hash,key,default=None):\n # FIXME: it would be cool if load starts with a copy of the hash\n # and clears off entries as recieved, such that we can tell if any\n # entries are not loaded. This should result in a warning in the return\n # object.\n assert hash is not None, \"hash is None\"\n assert key is not None, \"key is None\"\n if hash.has_key(key):\n return hash[key]\n else:\n return default", "def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()", "def _cache_state(self, instance):\n if instance.pk:\n instance.__cache_data = dict((f, getattr(instance, f)) for f in self.cache_fields)\n else:\n instance.__cache_data = UNSAVED", "def get(self, key):\n if key in self.cache:\n return self.cache[key]\n valueat,valuelen = self.keys[key]\n valuedump = self.file.readp(valueat, valuelen)\n value = pickle.loads(valuedump)\n self.cache[key] = value\n return value", "def __cached(self):\n # already cached stuff\n if self._cached is None:\n self._cached = Cached(self.resource)\n return self._cached", "def _load(self):\n self.logger.debug(\"Loading from persistence\")\n # load whole item from persistence\n data = self._persistence.load(self.id(), default={})\n if not data:\n return\n\n try:\n self.persistence_deserialize(data)\n except NotImplementedError:\n # allow backwards compatibility or persisted_values way\n for persisted_var in self.persisted_values():\n if persisted_var in data:\n self.logger.debug(\"Loaded value {} for attribute {}\".format(\n data[persisted_var], persisted_var))\n # Set the loaded value to the attribute on this class\n setattr(self, persisted_var, data[persisted_var])\n except:\n # log exception while loading and let it continue\n self.logger.exception(\n \"Failed to deserialize block with data: {}\".format(data))", "def get(self, identifier):\n cache_file_path = self._get_cache_file_path(identifier)\n\n if os.path.isfile(cache_file_path):\n with open(cache_file_path, 'rb') as fp:\n result = pickle.load(fp)\n return result\n\n return None", "def load(self):\n result = bolt.PickleDict.load(self)\n if not result and self.oldPath.exists():\n ins = None\n try:\n ins = self.oldPath.open('r')\n self.data.update(compat.uncpickle(ins))\n ins.close()\n result = 1\n except EOFError:\n if ins: ins.close()\n #--Done\n return result", "def load_cache(self):\n self.mu.load(self.cached_mu)\n self.var.load(self.cached_var)\n self.count.load(self.cached_count)", "def __getstate__(self):\n with self.override_evaluator(None):\n loaded_from = self.__loaded_from\n try:\n self.__loaded_from = None\n return prepare_dict(self.__dict__)\n finally:\n self.__loaded_from = loaded_from", "def cache_get(self, key: str) -> Optional[bytes]:\n if self.cache is not None:\n return self.cache.get(key)\n return None", "def value(self) -> Any:\n if self._value_cached is not None:\n return self._value_cached\n self._value_cached = self._value()\n return self._value_cached", "def get(self, key):\n if key and key in self.cache_data:\n return self.cache_data[key]\n return None", "def load(self):\n return self._value", "def complete(self, task_key_logger):\n\n assert self._is_initialized\n assert not self.is_blocked\n assert not self.is_complete\n\n # See if we can load it from the cache.\n if self.provider.attrs.should_persist() and all(\n axr.can_load() for axr in self._cache_accessors\n ):\n # We only load the hashed result while completing task state\n # and lazily load the entire result when needed later.\n value_hashes_by_name = {}\n for accessor in self._cache_accessors:\n value_hash = accessor.load_result_value_hash()\n value_hashes_by_name[accessor.query.dnode.to_entity_name()] = value_hash\n\n self._result_value_hashes_by_name = value_hashes_by_name\n # If we cannot load it from cache, we compute the task state.\n else:\n self._compute(task_key_logger)\n\n self.is_complete = True", "def get(self, key, default=None):\n try:\n # get the value from the cache\n value = self._cache.get(self.prepare_key(key))\n if value is None:\n return default\n # pickle doesn't want a unicode!\n value = smart_str(value)\n # hydrate that pickle\n return pickle.loads(value)\n except Exception as err:\n return self.warn_or_error(err)", "def retrieve_from_cache(self, x, y):\n return False", "def set_cache(self, eval_hash: str, task_hash: str, args_hash: str, value: Any) -> None:\n self.backend.set_eval_cache(eval_hash, task_hash, args_hash, value, value_hash=None)", "def is_cached(self):\n return False", "def fetch(self, hash):\n return self.r.get(hash)", "def loaded(self):\n return self._loaded", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def get(self, key):\n if key and key in self.cache_data.keys():\n return self.cache_data[key]\n else:\n return None", "def maybe_refresh(self, name=None):\n now = time.time()\n if self.last_load is None or (now - self.last_load) > self.tfs.cache_validity:\n self.load(name)", "def memoization_get(self, key):\n memoization_prepare(self)\n return getattr(self, constants.CONST_MEMOIZATION, {}).get(key, None)" ]
[ "0.72252256", "0.63859934", "0.5972734", "0.5886148", "0.5879905", "0.5804419", "0.574814", "0.5677867", "0.5613093", "0.5568389", "0.5543914", "0.54435587", "0.54258996", "0.5423656", "0.5395598", "0.53953743", "0.5376388", "0.53689486", "0.53612226", "0.53522384", "0.5350868", "0.53448087", "0.5339524", "0.533585", "0.53132397", "0.52989894", "0.5256142", "0.5256142", "0.5247188", "0.5243595" ]
0.71849906
1
Refreshes all state that depends on the persistent cache. This is useful if the external cache state might have changed since we last worked with this task.
def refresh_all_persistent_cache_state(self, context): # If this task state is not initialized or not persisted, there's nothing to # refresh. if not self.is_initialized or not self.should_persist: return self.refresh_cache_accessor(context) # If we haven't loaded anything from the cache, we can stop here. if self._result_value_hash is None: return # Otherwise, let's update our value hash from the cache. if self._cache_accessor.can_load(): self._load_value_hash() else: self._result_value_hash = None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def flush_local_cache(self):\n self._local_cache = {}", "def reload_cache(self):\n self.data = self.read_data_cache()", "def flush_cache(self):\n if self.cache_modified:\n self.cache_manager.write(self.cache_file, self.cache)", "def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()", "def _invalidate_local_get_event_cache_all(self) -> None:\n self._get_event_cache.clear()\n self._event_ref.clear()\n self._current_event_fetches.clear()", "def flushCaches(self):\n self.rehabTreeCache = {} \n self.frailRehabTreeCache = {} \n self.frailTreeCache = {}", "def reset_cache(self):\n self._cache_complete = False\n self._cache = {}\n self._catcache = {}", "def refresh(self):\n self.fetch(False)", "def flush_caches(self):\n spotify.Error.maybe_raise(\n lib.sp_session_flush_caches(self._sp_session))", "def refresh_cache_accessor(self, context):\n\n self._cache_accessor = context.core.persistent_cache.get_accessor(\n task_key=self.task_key,\n provenance=self._provenance,\n )\n if context.core.versioning_policy.check_for_bytecode_errors:\n self._check_accessor_for_version_problems()", "def _flush_cached_by_key(cls, key, force=True):\n try:\n if force or cls.at_idmapper_flush():\n del cls.__dbclass__.__instance_cache__[key]\n else:\n cls._dbclass__.__instance_cache__[key].refresh_from_db()\n except KeyError:\n # No need to remove if cache doesn't contain it already\n pass", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refresh(self):\n self.update_from_file()\n self.update_from_env()", "def recache_updates(self):\n ks = ['BooksUpdated', '%s'%self.key().id()]\n decaches(ks)", "def purge_cache(self):\n\n self.local_store.purge_cache()", "def reset_cache(self, force_reset=False):\n if force_reset:\n self.write_data_cache(self._empty_data())\n else:\n msg = 'All information about stored datasets will be lost if you proceed! ' + \\\n 'Set \\'force_reset=True\\' to proceed with the reset of dbcollection.json.'\n warnings.warn(msg, UserWarning, stacklevel=2)", "def _refresh_cache(self, data_dict):\r\n pass", "def refresh(self):\n for budget in self.budgets:\n budget.refresh()\n self._budgets = None", "def sync_tree_cache(self) -> None:\n self.sync_tree_with_data(self.tree_cache, self.data_cache)", "def update_cache(self, repo=None, force=False):\n raise NotImplementedError(self.update_cache)", "def _do_flush(self, cache):\n try:\n while cache and not self._stop_flushing:\n key, value = cache.popitem()\n self._shelf[self._encode_key(key)] = value\n if cache:\n cache.clear()\n except BaseException as exception:\n self._flush_exception = exception", "def sync(self):\n\n # Ensure to rerun only once to avoid infinite loops caused by a constantly changing state value at each run.\n # Example: state.value += 1\n\n if self._state[\"is_rerun\"]:\n self._state[\"is_rerun\"] = False\n\n elif self._state[\"hash\"] is not None:\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(\n self._state[\"data\"], None\n ):\n self._state[\"is_rerun\"] = True\n self._state[\"session\"].request_rerun()\n\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def refresh_all(self) -> None:\n self._update_thread.force_refresh_folder(self.feed_cache)", "def update_provenance(self):\n\n try:\n self._save_or_reregister_result(None)\n except InternalCacheStateError as e:\n self._raise_state_error_with_explanation(e)", "def sync(self):\r\n\r\n # Ensure to rerun only once to avoid infinite loops\r\n # caused by a constantly changing state value at each run.\r\n #\r\n # Example: state.value += 1\r\n if self._state[\"is_rerun\"]:\r\n self._state[\"is_rerun\"] = False\r\n\r\n elif self._state[\"hash\"] is not None:\r\n if self._state[\"hash\"] != self._state[\"hasher\"].to_bytes(self._state[\"data\"], None):\r\n self._state[\"is_rerun\"] = True\r\n self._state[\"session\"].request_rerun()\r\n\r\n self._state[\"hash\"] = self._state[\"hasher\"].to_bytes(self._state[\"data\"], None)", "def reset_cache(self):\n if self.cache_address is not None:\n for add in self.cache:\n os.remove(add + \".cd\")\n os.remove(add + \".cl\")\n self.cache = [None] * len(self)", "def flush():\n for k in cache._thecache.keys():\n del cache._thecache[k]", "def cache_clear():\r\n with lock:\r\n for value in cache.values():\r\n on_eviction(value[RESULT])\r\n cache.clear()\r\n root = nonlocal_root[0]\r\n root[:] = [root, root, None, None]\r\n stats[:] = [0, 0]", "def refresh(self, force_cache=False):\n if self.check_if_ok_to_update() or force_cache:\n for sync_name, sync_module in self.sync.items():\n _LOGGER.debug(\"Attempting refresh of sync %s\", sync_name)\n sync_module.refresh(force_cache=force_cache)\n if not force_cache:\n # Prevents rapid clearing of motion detect property\n self.last_refresh = int(time.time())\n return True\n return False", "def refresh(self): # noqa\n data = self.connection.hgetall(self.key)\n if not data:\n raise NoSuchJobError('No such job: {0}'.format(self.key))\n self.restore(data)" ]
[ "0.6510259", "0.63184017", "0.6079995", "0.6077812", "0.60450286", "0.5920667", "0.5860059", "0.58537775", "0.5784254", "0.5776172", "0.5739506", "0.57234544", "0.5665114", "0.56476337", "0.564716", "0.56465435", "0.5620647", "0.5612728", "0.55823725", "0.5579131", "0.5574888", "0.5574286", "0.5565036", "0.5557849", "0.55449605", "0.5540344", "0.5538831", "0.55213434", "0.5516927", "0.55164963" ]
0.75692886
0
Checks for any versioning errors i.e., any cases where a task's function code was updated but its version annotation was not.
def _check_accessor_for_version_problems(self): old_prov = self._cache_accessor.load_provenance() if old_prov is None: return new_prov = self._cache_accessor.provenance if old_prov.exactly_matches(new_prov): return if old_prov.nominally_matches(new_prov): # If we have a nominal match but not an exact match, that means the # user must changed a function's bytecode but not its version. To report # this, we first need to figure out which function changed. It could be # the one for this task, or it could be any immediate non-persisted # ancestor of this one. Fortunately, each provenance contains links to each of # its dependency digests, and a digest of non-persisted value contains that # value's provenance, so we can recursively search through our ancestor # provenances until we find which one caused the mismatch. def locate_mismatched_provenances_and_raise(old_prov, new_prov): assert old_prov.nominally_matches(new_prov) # If the bytecode doesn't match, we found the problematic pair. if old_prov.bytecode_hash != new_prov.bytecode_hash: message = f""" Found a cached artifact with the same descriptor ({self._cache_accessor.provenance.descriptor!r}) and version (major={old_prov.code_version_major!r}, minor={old_prov.code_version_minor!r}), but created by different code. It appears that the code function that outputs {new_prov.descriptor} was changed (old bytecode hash {old_prov.bytecode_hash!r}; new bytecode hash {new_prov.bytecode_hash!r}) but the function's version number was not. Change @version(major=) to indicate that your function's behavior has changed, or @version(minor=) to indicate that it has *not* changed. """ raise CodeVersioningError(oneline(message), new_prov.descriptor) # If the provenances nominally match, they must have essentially the # same structure. assert len(old_prov.dep_digests) == len(new_prov.dep_digests) # Since these provenances match nominally and have matching bytcode, # the mismatch must be in one of their dependencies. We'll iterate # through them to figure out which one. for old_dep_digest, new_dep_digest in zip( old_prov.dep_digests, new_prov.dep_digests ): # If this digest pair matches, it must not be where the problem is. if old_dep_digest.exact_hash == new_dep_digest.exact_hash: continue # Not all digests have provenances, but these should. Digests of # non-persisted values have provenances, and if these were persisted # then their exact hashes would be the same as their nominal hashes, # so they would have matched above. old_dep_prov = old_dep_digest.provenance new_dep_prov = new_dep_digest.provenance locate_mismatched_provenances_and_raise(old_dep_prov, new_dep_prov) assert False try: locate_mismatched_provenances_and_raise(old_prov, new_prov) except AssertionError as e: message = f""" Enncountered an internal error while performing an assisted versioning check. This should be impossible and is probably a bug in Bionic; please report this stace track to the developers. However, it's also likely that you need to update the ``@version`` annotation on the function that outputs {self._cache_accessor.provenance.descriptor}. If that doesn't fix the warning, you can try filtering the warning with ``warnings.filterwarnings``; deleting the disk cache; or disabling assisted versioning. """ logger.warn(oneline(message), exc_info=e) self._cache_accessor.update_provenance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _validate_missing_versioned_tags(module):\n version_table = getattr(module, '__version_table__', {})\n\n # Get all functions from the module.\n functions = inspect.getmembers(module, inspect.isfunction)\n functions_dict = dict(functions)\n function_names = list(functions_dict)\n\n # Get all classes from the module.\n classes = inspect.getmembers(module, inspect.isclass)\n classes_dict = dict(classes)\n class_names = list(classes_dict)\n\n for name in version_table.keys():\n msg = 'Both a versioned and unversioned `%s` exist in file `%s`!' % (name, module.__file__)\n class_pattern = re.compile('^%s%s' % (name, CLASS_SUFFIX_RE))\n func_pattern = re.compile('^%s%s' % (name, FUNCTION_SUFFIX_RE))\n class_matches = [class_pattern.search(_name) for _name in class_names if class_pattern.search(_name)]\n function_matches = [func_pattern.search(_name) for _name in function_names if func_pattern.search(_name)]\n\n # Check 1: @versioned() decorator on a function.\n # Check for duplicate names in classes or function names. Unversioned\n # functions appear in the funtions list whilst versioned appear in\n # the classes list. If the same name exists in both lists there's\n # a unversioned function.\n if (name in class_names or class_matches) and (name in function_names or function_matches):\n raise MissingDecoratorException(msg)\n\n # Check 2: @versioned(NUMBER) decorator on a function.\n # Versioned members are always a class due to the return of a\n # ProxyClass. If the name is in the version table there is a\n # decorated member. This filters decorated functions. If a function\n # is decorated and not decorated it shows in the functions list but\n # no longer in the classes list.\n if name not in class_names and name in function_names:\n raise MissingDecoratorException(msg)\n\n # Check 3: @versioned() or @versioned(NUMBER) decorator on a class.\n if name in class_names or class_matches:\n names_to_check = []\n # In case of suffix classes find all matching suffixed classes\n # to check.\n if class_matches:\n for match in class_matches:\n names_to_check.append(match.group())\n else:\n names_to_check.append(name)\n\n # Check if all the listed classes are versioned.\n for key in names_to_check:\n if not getattr(classes_dict[key], '_is_versioned', False):\n raise MissingDecoratorException(msg)", "def _validate_continuous_versioning(module):\n version_table = getattr(module, '__version_table__', {})\n\n # Loop all functions or classes with their given version mappings.\n for member_name, version_mapping in iteritems(version_table):\n # Get versions and sort them.\n versions = list(version_mapping['members'])\n versions.sort()\n\n # Check if there are gaps in the versions or if it does not start at 1.\n if versions != list(range(1, len(versions) + 1)):\n missing_versions = list(set(range(1, len(versions) + 1)) - set(versions))\n error = ('Versions need to be consecutive and start at `1`, missing version `%s`'\n ' for `%s` in file `%s`' % (\n missing_versions,\n member_name,\n module.__file__,\n ))\n raise InterruptedVersioningException(error)", "def check_versions(context, num=0, versions='', ecosystem='', package=''):\n versions = split_comma_separated_list(versions)\n vrsns = context.response.json()['items']\n assert len(vrsns) == num\n for v in vrsns:\n assert v['ecosystem'] == ecosystem\n assert v['package'] == package\n assert v['version'] in versions", "def invalid_versioned_targets(self):\n return self._invalid_versioned_targets", "def test_tooManyDotsToChangeVersionsScript(self):\n versionChanger = ChangeVersionsScript()\n self.assertRaises(SystemExit, versionChanger.main,\n [\"3.2.1.0\"])", "def test_version_rename_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version rename bad_version changed_name')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def is_valid_version(self):\n pass", "def test_version_missing(self):\r\n self.assertIsNone(self._version_test(self.no_version))", "def test_invalid_version_ints(self):\n self.assertRaises(ValueError, versions.Version, version='1a.2', name='foo')", "def check_all():\n for package, version in required_versions.items():\n try:\n module = importlib.import_module(package)\n except ImportError:\n return\n else:\n if StrictVersion(version) > StrictVersion(module.__version__):\n raise RuntimeError(\"Your version of %s is too old - it must be at least %s\" % (\n package,\n version,\n ))", "def vulnerable_versions(self):\n raise NotImplementedError()", "def test_check_version_non_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.1.0-dev\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\n \"INFO:dakara_feeder.version:\" \"Dakara feeder 0.1.0-dev (1970-01-01)\",\n \"WARNING:dakara_feeder.version:\"\n \"You are running a dev version, use it at your own risks!\",\n ],\n )", "def check_version(ctx, builder, version_function, *,\n requires_version=None,\n requires_at_least_version=None,\n requires_at_most_version=None):\n if any(v is not None for v in (\n requires_version,\n requires_at_least_version,\n requires_at_most_version)):\n ctx.logger.check('checking %s version' % builder)\n\n version_str = version_function()\n\n # Convert the version into a tuple\n version = []\n for i in version_str.split('.'):\n try:\n version.append(int(i))\n except ValueError:\n # The subversion isn't a number, so just convert it to a\n # string.\n version.append(i)\n version = tuple(version)\n\n if requires_version is not None and requires_version != version:\n msg = 'version %s required; found %s' % (\n '.'.join(str(i) for i in requires_version), version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_least_version is not None and \\\n requires_at_least_version > version:\n msg = 'at least version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_least_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n if requires_at_most_version is not None and \\\n requires_at_most_version < version:\n msg = 'at most version %s required; found %s' % (\n '.'.join(str(i) for i in requires_at_most_version),\n version_str)\n\n ctx.logger.failed(msg)\n raise fbuild.ConfigFailed(msg)\n\n ctx.logger.passed(version_str)", "def test_version_time_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version time bad_version \"%s\"'\n % self._test_date)\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def check_version(self, node):\n assert \"version\" in node, \"Version node does not contain attribute 'version'\"\n assert len(node[\"version\"]) >= 1, \"Expecting at least one 'version' value\"\n # TODO: add more thorough checks", "def test_no_version(self):\r\n errstring = \"unknown version\"\r\n with self.assertRaisesRegexp(ValueError, errstring):\r\n convert_between_versions(self.no_version, self.result_dir)", "def validate_versioned_image(images, instance_name, instance_version):\n print('Validating versioned instance {}, {}'.format(instance_name, instance_version))\n assert images['instance-type'].lower() == instance_name.lower()\n assert images['major-version'] == instance_version\n assert 'images' in images\n assert 'releases' in images\n assert 'min-tdc-version' in images\n assert 'hot-fix-ranges' in images\n\n # Collect all releases of instances\n releases = dict()\n for r in images.get('releases', list()):\n release_info = ReleaseInfo.add_release(instance_name, r, instance_version)\n releases[release_info.release_version] = release_info\n\n # Validate hot-fix range: each defined release should be in a hot-fix range\n hot_fixes = images.get('hot-fix-ranges', list())\n for rv in releases:\n found = False\n for fix_range in hot_fixes:\n if FlexVersion.in_range(rv, minv=fix_range['min'], maxv=fix_range['max']):\n found = True\n assert found, 'Release version {} of {} {} not in a valid hot-fix range' \\\n .format(rv, instance_name, instance_version)\n\n # Validate dependence min-max range: min <= max\n for release_info in releases.values():\n for dep in release_info.dependencies:\n res = FlexVersion.compares(dep.min_version, dep.max_version)\n assert res <= 0, 'Invalid min-max range [min: {}, max: {}] for version {} of {} {}' \\\n .format(dep.min_version, dep.max_version, release_info.release_version, instance_name, instance_version)", "def check_version(ctx, _, value):\n if not value or ctx.resilient_parsing:\n return\n\n click.echo(f\"geocube v{importlib.metadata.version('geocube')}\")\n\n ctx.exit()", "def validate(cfg: defs.Config) -> List[str]: # noqa: C901\n res: List[str] = []\n\n def check_branch(\n comp_name: str, branch_name: str, branch: Dict[str, defs.ComponentVersion]\n ) -> None:\n \"\"\"Validate versions within a single branch.\"\"\"\n uptodate_files: Dict[pathlib.Path, Tuple[pathlib.Path, defs.ComponentFile]] = {}\n\n if not RE_BRANCH_NAME.match(branch_name):\n res.append(f\"{comp_name}: Invalid branch name: {branch_name}\")\n\n for ver, version in sorted(branch.items()):\n if not RE_VERSION_STRING.match(ver):\n res.append(f\"{comp_name}/{branch_name}: Invalid version string: {ver}\")\n\n other_cksums, driver_cksums = _split_by_existence(comp_name, branch_name, version.files)\n if version.outdated:\n update_to = [\n o_version\n for o_version in branch.values()\n if not o_version.outdated\n and _split_by_existence(comp_name, branch_name, o_version.files)[0]\n == other_cksums\n ]\n if len(update_to) != 1:\n res.append(\n f\"{comp_name}/{branch_name}/{ver}: Got {len(update_to)} possible \"\n f\"versions to update to instead of exactly one\"\n )\n else:\n bad_files = sorted(\n relpath\n for relpath, (path, fdata) in driver_cksums.items()\n if util.file_sha256sum(path) != fdata.sha256\n )\n if bad_files:\n res.append(f\"{comp_name}/{branch_name}/{ver}: Bad checksum for {bad_files}\")\n\n if not uptodate_files:\n uptodate_files = driver_cksums\n elif uptodate_files != driver_cksums:\n res.append(\n f\"{comp_name}/{branch_name}: All the up-to-date versions should \"\n f\"define the same set of files with the same checksums\"\n )\n\n if not any(not version.outdated for version in branch.values()):\n res.append(f\"{comp_name}/{branch_name}: No non-outdated versions\")\n\n def check_component(comp_name: str, comp: defs.Component) -> None:\n \"\"\"Validate the definition of a single component.\"\"\"\n if not RE_COMP_NAME.match(comp_name):\n res.append(f\"Invalid component name: {comp_name}\")\n\n for branch_name, branch in sorted(comp.branches.items()):\n check_branch(comp_name, branch_name, branch)\n\n for comp_name, comp in sorted(cfg.all_components.components.items()):\n check_component(comp_name, comp)\n\n return res", "def test_version_remove_error_bad_version(self):\n test_name = sys._getframe().f_code.co_name\n rv, output = self._execute('version remove bad_version')\n self.assertEqual(2, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def verifyOpsForVersion(teal: List[TealComponent], version: int):\n for stmt in teal:\n if isinstance(stmt, TealOp):\n op = stmt.getOp()\n if op.min_version > version:\n raise TealInputError(\n \"Op not supported in TEAL version {}: {}. Minimum required version is {}\".format(\n version, op, op.min_version\n )\n )", "def test_invalid_version_fields(self):\n self.assertRaises(ValueError, versions.Version, version='1234', name='foo')", "def check_semver():\n body: t.Any = request.json\n check_error({'input': {'old': {}, 'new': {}}}, body)\n response_new = rpc_search({'input': body['input']['new']})\n response_old = rpc_search({'input': body['input']['old']})\n\n modules_new = response_new['yang-catalog:modules']['module']\n modules_old = response_old['yang-catalog:modules']['module']\n\n if len(modules_new) == 0 or len(modules_old) == 0:\n abort(404, description='No hits found either in old or new input')\n\n output_modules_list = []\n for mod_old in modules_old:\n name_new = None\n semver_new = None\n revision_new = None\n status_new = None\n name_old = mod_old['name']\n revision_old = mod_old['revision']\n organization_old = mod_old['organization']\n status_old = mod_old['compilation-status']\n for mod_new in modules_new:\n name_new = mod_new['name']\n revision_new = mod_new['revision']\n organization_new = mod_new['organization']\n status_new = mod_new['compilation-status']\n if name_new == name_old and organization_new == organization_old:\n if revision_old == revision_new:\n break\n semver_new = mod_new.get('derived-semantic-version')\n break\n if semver_new:\n semver_old = mod_old.get('derived-semantic-version')\n if semver_old:\n if semver_new != semver_old:\n output_mod = {}\n if status_old != 'passed' and status_new != 'passed':\n reason = 'Both modules failed compilation'\n elif status_old != 'passed' and status_new == 'passed':\n reason = 'Older module failed compilation'\n elif status_new != 'passed' and status_old == 'passed':\n reason = 'Newer module failed compilation'\n else:\n file_name = (\n f'{ac.w_yangcatalog_api_prefix}/services/file1={name_new}@{revision_new}'\n f'/check-update-from/file2={name_old}@{revision_old}'\n )\n reason = f'pyang --check-update-from output: {file_name}'\n\n diff = (\n f'{ac.w_yangcatalog_api_prefix}/services/diff-tree'\n f'/file1={name_old}@{revision_old}/file2={name_new}@{revision_new}'\n )\n output_mod['yang-module-pyang-tree-diff'] = diff\n\n output_mod['name'] = name_old\n output_mod['revision-old'] = revision_old\n output_mod['revision-new'] = revision_new\n output_mod['organization'] = organization_old\n output_mod['old-derived-semantic-version'] = semver_old\n output_mod['new-derived-semantic-version'] = semver_new\n output_mod['derived-semantic-version-results'] = reason\n diff = (\n f'{ac.w_yangcatalog_api_prefix}/services/diff-file'\n f'/file1={name_old}@{revision_old}/file2={name_new}@{revision_new}'\n )\n output_mod['yang-module-diff'] = diff\n output_modules_list.append(output_mod)\n if len(output_modules_list) == 0:\n abort(404, description='No different semantic versions with provided input')\n output = {'output': output_modules_list}\n return output", "def checkVersion(self):\n try:\n respInfo = self._reqSession.get(self._host + \"/static/pythonSDKVersion.txt\")\n if respInfo.status_code != 200 or len(respInfo.text) > 20:\n return\n latestVersion = respInfo.text.strip()\n import eventregistry._version as _version\n currentVersion = _version.__version__\n for (latest, current) in zip(latestVersion.split(\".\"), currentVersion.split(\".\")):\n if int(latest) > int(current):\n logger.info(\"==============\\nYour version of the module is outdated, please update to the latest version\")\n logger.info(\"Your version is %s while the latest is %s\", currentVersion, latestVersion)\n logger.info(\"Update by calling: pip install --upgrade eventregistry\\n==============\")\n return\n # in case the server mistakenly has a lower version that the user has, don't report an error\n elif int(latest) < int(current):\n return\n except:\n pass", "def validate_module_versioning(module_name):\n module = sys.modules[module_name]\n\n _validate_continuous_versioning(module)\n _validate_missing_versioned_tags(module)", "def test_version_check_outdated(self):\n output = self.run_command(\"selfupdate --check bennr01:selfupdate_test_outdated\", exitcode=0)\n self.assertIn(\"Target: bennr01:selfupdate_test_outdated\", output)\n self.assertNotIn(\"Target: ywangd:master\", output)\n self.assertIn(\"Already at latest version\", output)\n self.assertNotIn(\"New version available\", output)\n self.assertNotIn(\"Error: \", output)", "def _is_version_uptodate(self):\n logging.info(\"Checking tesseract version\")\n cmd = '%s -v' % (self.binary)\n logging.info(cmd) \n try:\n ret_output = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except CalledProcessError:\n # Could not run tesseract\n error(self.msgs['TS_MISSING'])\n\n ver_str = '0.0.0'\n for line in ret_output.splitlines():\n if 'tesseract' in line:\n ver_str = line.split(' ')[1]\n if ver_str.endswith('dev'): # Fix for version strings that end in 'dev'\n ver_str = ver_str[:-3]\n\n # Iterate through the version dots\n ver = [int(x) for x in ver_str.split('.')]\n req = [int(x) for x in self.required.split('.')]\n\n # Aargh, in windows 3.02.02 is reported as version 3.02 \n # SFKM\n if str(os.name) == 'nt':\n req = req[:2]\n\n version_good = False\n for i,num in enumerate(req):\n if len(ver) < i+1:\n # This minor version number is not present in tesseract, so it must be\n # lower than required. (3.02 < 3.02.01)\n break\n if ver[i]==num and len(ver) == i+1 and len(ver)==len(req):\n # 3.02.02 == 3.02.02\n version_good = True\n continue\n if ver[i]>num:\n # 4.0 > 3.02.02\n # 3.03.02 > 3.02.02\n version_good = True\n break\n if ver[i]<num:\n # 3.01.02 < 3.02.02\n break\n \n return version_good, ver_str", "def _handle_version_argument(self, arguments):\n if '--version' in arguments:\n LOGGER.info('%s version %s', __name__, __version__)\n return exit(0)", "def _check_accessors_for_version_problems(self):\n\n accessors_needing_saving = []\n for accessor in self._cache_accessors:\n old_prov = accessor.load_provenance()\n\n if old_prov is None:\n continue\n\n new_prov = accessor.query.provenance\n\n if old_prov.exactly_matches(new_prov):\n continue\n accessors_needing_saving.append(accessor)\n\n if old_prov.code_version_minor == new_prov.code_version_minor:\n if old_prov.bytecode_hash != new_prov.bytecode_hash:\n raise CodeVersioningError(\n oneline(\n f\"\"\"\n Found a cached artifact with the same\n descriptor ({accessor.query.dnode.to_descriptor()!r}) and\n version (major={old_prov.code_version_major!r},\n minor={old_prov.code_version_minor!r}),\n But created by different code\n (old hash {old_prov.bytecode_hash!r},\n new hash {new_prov.bytecode_hash!r}).\n Did you change your code but not update the\n version number?\n Change @version(major=) to indicate that your\n function's behavior has changed, or @version(minor=)\n to indicate that it has *not* changed.\"\"\"\n )\n )\n\n for accessor in accessors_needing_saving:\n accessor.update_provenance()", "def check_package_version(self, node, ecosystem, package, version):\n assert \"package\" in node, \"'package' node is expected\"\n assert \"version\" in node, \"'version' node is expected\"\n self.check_package_part(node, ecosystem, package)\n self.check_version_part(node, ecosystem, package, version)\n # TODO: add more thorough checks" ]
[ "0.69742507", "0.61686295", "0.6032363", "0.59617966", "0.58357054", "0.5830695", "0.5827717", "0.5817524", "0.58131546", "0.5808405", "0.58020407", "0.57866913", "0.57857144", "0.57734126", "0.57588166", "0.56948197", "0.5692589", "0.56653947", "0.56652933", "0.56586355", "0.5641769", "0.5641158", "0.56338334", "0.5622077", "0.5606365", "0.5597684", "0.5594842", "0.5588328", "0.55821043", "0.5575799" ]
0.6633973
1
Reads (from disk or cloud) and saves (in memory) this task's value hash.
def _load_value_hash(self): artifact = self._cache_accessor.load_artifact() if artifact is None or artifact.content_hash is None: raise AssertionError( oneline( f""" Failed to load cached value (hash) for descriptor {self._cache_accessor.provenance.descriptor!r}. This suggests we did not successfully compute the task in a subprocess, or the entity wasn't cached; this should be impossible!""" ) ) self._result_value_hash = artifact.content_hash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_digest(self, task: \"TaskView\") -> dict:\n # XXX user definition should be able to exclude inputs from digest\n # XXX might throw AttributeError\n inputs = task._resolved_inputs # type: ignore\n\n # sensitive values are always redacted so no point in including them in the digest\n # (for cleaner output and security-in-depth)\n keys = [\n k\n for k in inputs.keys()\n if k not in self.exclude_from_digest\n and not isinstance(inputs[k].resolved, sensitive)\n ]\n values = [inputs[key] for key in keys]\n\n for dep in task.dependencies:\n assert isinstance(dep, Dependency)\n if not isinstance(dep.expected, sensitive):\n keys.append(dep.expr)\n values.append(dep.expected)\n\n if keys:\n inputdigest = get_digest(values, manifest=task._manifest)\n else:\n inputdigest = \"\"\n\n digest = dict(digestKeys=\",\".join(keys), digestValue=inputdigest)\n task.logger.debug(\n \"digest for %s: %s=%s\", task.target.name, digest[\"digestKeys\"], inputdigest\n )\n return digest", "def _actual_hash(self):\n return hash_of_file(join(self._temp_path, self._downloaded_filename()))", "def hash(self):\n block = 1024 * 1024 * 4 # 4 MB.\n hasher = hashlib.sha256()\n\n with open(self.path, \"rb\") as f:\n while True:\n chunk = f.read(block)\n if not chunk:\n break\n hasher.update(hashlib.sha256(chunk).digest())\n\n digest = hasher.hexdigest()\n pdbox.debug(\"Hash for %s: %s\" % (self.path, digest))\n return digest", "def save_current_hash(self, comment: Optional[str] = None) -> None:\n assert len(self.hash_states) > 0\n hashval = self.hash_states[-1].hexdigest()\n scratch = self.workspace._get_local_scratch_space_for_resource(self.name)\n hashfile = join(scratch, \"hashval.txt\")\n with open(hashfile, \"w\") as f:\n f.write(hashval)\n if self.workspace.verbose:\n print(\"dws>> %s: wrote hashval of '%s' to %s'\" % (self.name, hashval, hashfile))\n commentfile = join(scratch, \"comment.txt\")\n if comment is not None:\n with open(commentfile, \"w\") as f:\n f.write(comment + \"\\n\")\n else:\n if exists(commentfile):\n os.remove(commentfile)", "def get_contents_hash(self):\n md5 = hashlib.md5()\n with closing(self.open()) as handle:\n for chunk in handle.chunks():\n md5.update(chunk)\n return md5.hexdigest()", "def save_hash_file(self):\n\n cid = time.time()\n '''\n f = open(self.file_hash_name, 'w+')\n f.write(str(cid))\n f.close()\n '''\n\n with open(self.file_hash_name, \"w\") as f:\n logger.debug('CID salvo file {}'.format(cid))\n fcntl.flock(f, fcntl.LOCK_EX | fcntl.LOCK_NB)\n f.write(str(cid))\n fcntl.flock(f, fcntl.LOCK_UN)\n\n self.set_cache_hash(cid)", "def existing_hash(self, id):\r\n return self._read_sha_by_id(id)", "def hash_file(hash_obj, path):\n with open(path, 'rb') as f:\n while True:\n b = f.read(8192)\n if not b:\n break\n hash_obj.update(b)\n return hash_obj", "def get(self, key):\n if key in self.cache:\n return self.cache[key]\n valueat,valuelen = self.keys[key]\n valuedump = self.file.readp(valueat, valuelen)\n value = pickle.loads(valuedump)\n self.cache[key] = value\n return value", "def _hash_and_save(self, node):\n validate_is_bin_node(node)\n\n node_hash = keccak(node)\n self.db[node_hash] = node\n return node_hash", "def _get_hash(self, path):\n with open(path, \"r\") as fp:\n content = fp.read()\n\n return sha256(content).hexdigest()", "def get_hash(self):\r\n return", "def hash(self):\n return self._obs_file.hash()", "def compute(self):\n self.checksum = self.get_files_hashes_in_path()\n self.real_checksum = self.checksum\n # This appends the filename when checksum was made for a single file.\n # We need to get this when testing the consistency on the moment of\n # restore.\n if self.count == 1:\n self.checksum = self.real_checksum + os.path.basename(self.path)\n return self.checksum", "def hashOfTempFile(self):\n if os.path.exists(\"temp.dat\"):\n with open(\"temp.dat\", 'r') as f:\n data = f.read()\n return hashlib.md5(data).hexdigest()\n else:\n return \"noTempFile\"", "def getHash(self):\n if self.chash:\n return self.chash\n else:\n self.setHash()\n return self.chash", "def fetch(self, hash):\n return self.r.get(hash)", "def file_hash(load, fnd):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = {}\n\n if \"saltenv\" not in load:\n return ret\n\n if \"path\" not in fnd or \"bucket\" not in fnd or not fnd[\"path\"]:\n return ret\n\n cached_file_path = _get_cached_file_name(\n fnd[\"bucket\"], load[\"saltenv\"], fnd[\"path\"]\n )\n\n if os.path.isfile(cached_file_path):\n ret[\"hsum\"] = salt.utils.hashutils.get_hash(cached_file_path)\n ret[\"hash_type\"] = \"md5\"\n\n return ret", "def _hash_file_content(self, path):\n hasher = hashlib.sha1()\n with open(path, 'rb') as file:\n buffer = file.read(self.hash_block_size)\n while len(buffer) > 0:\n hasher.update(buffer)\n buffer = file.read(self.hash_block_size)\n return hasher.hexdigest()", "def current_hash(self):", "def hash_file(self, file_path, file_arcname):\n\n file_path = os.path.abspath(file_path)\n\n # If the file_arcname argument is None use the base file name as the\n # arc name\n if file_arcname is None:\n file_arcname = os.path.basename(file_path)\n\n if not os.path.exists(file_path):\n task_error(\"%s doesn't exist\" % file_path)\n if not os.access(file_path, os.R_OK):\n task_error(\"Can't read from %s\" % file_path)\n\n file_mode = os.stat(file_path)[stat.ST_MODE]\n if not stat.S_ISDIR(file_mode) and not stat.S_ISREG(file_mode):\n task_error(\"Unknown file type for %s\" % file_path)\n\n file_in = None\n try:\n # open to read binary. This is important.\n file_in = open(file_path, 'rb')\n except IOError:\n task_error(\"Couldn't read from file: %s\" % file_path)\n\n # hash file 1Mb at a time\n hashval = hashlib.sha1()\n while True:\n data = file_in.read(1024 * 1024)\n if not data:\n break\n hashval.update(data)\n\n # update file bundle status\n\n self.running_size += len(data)\n\n self.percent_complete = 100.0 * self.running_size / self.bundle_size\n\n # only update significant progress\n if self.percent_complete - self.last_percent > 1:\n self.report_percent_complete()\n self.last_percent = self.percent_complete\n\n file_hash = hashval.hexdigest()\n\n # print 'hash: ' + file_hash\n file_in.close()\n\n modified_name = os.path.join('data', file_arcname)\n (file_dir, file_name) = os.path.split(modified_name)\n\n # linuxfy the directory\n file_dir = file_dir.replace('\\\\', '/')\n\n info = {}\n info['size'] = os.path.getsize(file_path)\n mime_type = mimetypes.guess_type(file_path, strict=True)[0]\n\n info['mimetype'] = mime_type if mime_type is not None else 'application/octet-stream'\n info['name'] = file_name\n info['mtime'] = DT.datetime.utcfromtimestamp(int(os.path.getmtime(file_path))).isoformat()\n info['ctime'] = DT.datetime.utcfromtimestamp(int(os.path.getctime(file_path))).isoformat()\n info['destinationTable'] = 'Files'\n info['subdir'] = file_dir\n info['hashsum'] = file_hash\n info['hashtype'] = 'sha1'\n\n # todo make sure errors bubble up without crashing\n if file_arcname in self.file_meta:\n print file_arcname\n task_error(\n \"Different file with the same arcname is already in the bundle\")\n return\n\n return info", "def spoof_prev_val_cache():\n f = open(Filenames.VALUE_CACHE, 'w')\n # {GlobalJobId: { classad field: [val, timestamp], ...}, ... }\n j = {\n \"uclhctest.t2.ucsd.edu#265.49#1452298787\": {\n \"RemoteUserCpu\": [89568.0, 1454011005]\n }\n }\n json.dump(j, f)\n f.close()", "def sync_after_remote_computation(self):\n\n # If this state was never initialized, it doesn't have any out-of-date\n # information, so there's no need to update anything.\n if not self.is_initialized:\n return\n\n assert self.should_persist\n\n # First, let's flush the stored entries in our cache accessor. Since we just\n # computed this entry in a subprocess, there should be a new cache entry that\n # isn't reflected yet in our local accessor.\n # (We don't just call self.refresh_cache_accessors() because we don't\n # particularly want to do the cache versioning check -- it's a little late to\n # do anything if it fails now.)\n self._cache_accessor.flush_stored_entries()\n\n # Then, populate the value hashes.\n if self._result_value_hash is None:\n self._load_value_hash()", "def ondisk_digest(self):\n with open(self.rename_phase_src) as f:\n return hasher(f.read()).hexdigest()", "def hash(self) -> str:\n return pulumi.get(self, \"hash\")", "def sync(self):\n _yaml = self.hashable.yaml()\n if self.path.exists():\n _yaml_on_disk = self.path.read_text()\n if _yaml_on_disk != _yaml:\n e.code.CodingError(\n msgs=[\n \"Info file mismatch ... should never happen\",\n \"State on disk: \",\n [_yaml_on_disk],\n \"State in memory: \",\n [_yaml],\n ]\n )\n else:\n # handle info file and make it read only\n # ... write hashable info\n self.path.write_text(_yaml)\n # ... make read only as done only once\n util.io_make_path_read_only(self.path)", "def get_hash(self):\n return self.__hash", "def _HashFile(self, fd):\n hashes = fd.Get(fd.Schema.HASH)\n if hashes:\n found_all = True\n for fingerprint_type, hash_types in self.HASH_TYPES.iteritems():\n for hash_type in hash_types:\n if fingerprint_type == \"pecoff\":\n hash_type = \"pecoff_%s\" % hash_type\n if not hashes.HasField(hash_type):\n found_all = False\n break\n if not found_all:\n break\n if found_all:\n return hashes\n\n fingerprinter = fingerprint.Fingerprinter(fd)\n if \"generic\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"generic\"])\n fingerprinter.EvalGeneric(hashers=hashers)\n if \"pecoff\" in self.HASH_TYPES:\n hashers = self._GetHashers(self.HASH_TYPES[\"pecoff\"])\n if hashers:\n fingerprinter.EvalPecoff(hashers=hashers)\n\n if not hashes:\n hashes = fd.Schema.HASH()\n\n for result in fingerprinter.HashIt():\n fingerprint_type = result[\"name\"]\n for hash_type in self.HASH_TYPES[fingerprint_type]:\n if hash_type not in result:\n continue\n\n if hash_type == \"SignedData\":\n # There can be several certs in the same file.\n for signed_data in result[hash_type]:\n hashes.signed_data.Append(revision=signed_data[0],\n cert_type=signed_data[1],\n certificate=signed_data[2])\n continue\n\n # Set the hashes in the original object\n if fingerprint_type == \"generic\":\n hashes.Set(hash_type, result[hash_type])\n\n elif fingerprint_type == \"pecoff\":\n hashes.Set(\"pecoff_%s\" % hash_type, result[hash_type])\n\n else:\n logging.error(\"Unknown fingerprint_type %s.\", fingerprint_type)\n\n try:\n fd.Set(hashes)\n except IOError:\n pass\n return hashes", "def read_data(self, hashVal: NUMPY_10_DataHashSpec) -> np.ndarray:\n srcSlc = (hashVal.collection_idx, *[slice(0, x) for x in hashVal.shape])\n try:\n res = self.Fp[hashVal.uid][srcSlc]\n except TypeError:\n self.Fp[hashVal.uid] = self.Fp[hashVal.uid]()\n res = self.Fp[hashVal.uid][srcSlc]\n except KeyError:\n process_dir = self.STAGEDIR if self.mode == 'a' else self.STOREDIR\n if Path(process_dir, f'{hashVal.uid}.npy').is_file():\n file_pth = self.DATADIR.joinpath(f'{hashVal.uid}.npy')\n self.rFp[hashVal.uid] = open_memmap(file_pth, 'r')\n res = self.Fp[hashVal.uid][srcSlc]\n else:\n raise\n\n out = np.array(res, dtype=res.dtype, order='C')\n if xxh64_hexdigest(out) != hashVal.checksum:\n raise RuntimeError(\n f'DATA CORRUPTION Checksum {xxh64_hexdigest(out)} != recorded {hashVal}')\n return out", "def hash(self) -> bytes:" ]
[ "0.61814857", "0.5982808", "0.5848447", "0.5757408", "0.5693065", "0.5498911", "0.54744655", "0.5450672", "0.5423203", "0.54196274", "0.5387745", "0.53874344", "0.53849286", "0.5367149", "0.5363407", "0.53323734", "0.5316272", "0.52829957", "0.52800924", "0.5279477", "0.5273966", "0.5272517", "0.5265391", "0.52559274", "0.5254929", "0.5251628", "0.52242", "0.52090585", "0.519708", "0.51841265" ]
0.65436584
0
Returns copies of the provided TaskStates with any unnecessary state and ancestors "stripped" off; these copies can be safely transmitted to another process for computation.
def strip_states(self, states): stripped_states_by_task_key = {} def strip_state(original_state): """Returns a stripped copy of a TaskState.""" task_key = original_state.task_key if task_key in stripped_states_by_task_key: return stripped_states_by_task_key[task_key] assert original_state in self.all_states assert original_state not in self.non_serializable_states # Make a copy of the TaskState, which we'll strip down to make it # easier to serialize. # (This is a shallow copy, so we'll make sure to avoid mutating any of # its member variables.) stripped_state = copy.copy(original_state) stripped_states_by_task_key[task_key] = stripped_state # Strip out data cached in memory -- we can't necessarily pickle it, so # we need to get rid of it before trying to transmit this state to # another process. stripped_state._result = None # External dependency states are expected to be already completed, so we # don't need to include their task information or any of their dependencies. if original_state in self.external_dependency_states: stripped_state.task = None stripped_state.func_attrs = None stripped_state.dep_states = [] # Otherwise, we'll recursively strip all the dependency states as well. else: stripped_state.dep_states = [ strip_state(dep_state) for dep_state in original_state.dep_states ] # We also strip and include any followup states. stripped_state.followup_states = [ strip_state(followup_state) for followup_state in original_state.followup_states ] return stripped_state return [strip_state(state) for state in states]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_state(original_state):\n\n task_key = original_state.task_key\n if task_key in stripped_states_by_task_key:\n return stripped_states_by_task_key[task_key]\n\n assert original_state in self.all_states\n assert original_state not in self.non_serializable_states\n\n # Make a copy of the TaskState, which we'll strip down to make it\n # easier to serialize.\n # (This is a shallow copy, so we'll make sure to avoid mutating any of\n # its member variables.)\n stripped_state = copy.copy(original_state)\n stripped_states_by_task_key[task_key] = stripped_state\n\n # Strip out data cached in memory -- we can't necessarily pickle it, so\n # we need to get rid of it before trying to transmit this state to\n # another process.\n stripped_state._result = None\n\n # External dependency states are expected to be already completed, so we\n # don't need to include their task information or any of their dependencies.\n if original_state in self.external_dependency_states:\n stripped_state.task = None\n stripped_state.func_attrs = None\n stripped_state.dep_states = []\n\n # Otherwise, we'll recursively strip all the dependency states as well.\n else:\n stripped_state.dep_states = [\n strip_state(dep_state) for dep_state in original_state.dep_states\n ]\n\n # We also strip and include any followup states.\n stripped_state.followup_states = [\n strip_state(followup_state)\n for followup_state in original_state.followup_states\n ]\n\n return stripped_state", "def prune_states(state: PushState) -> PushState:\n if state and not state[-1]:\n return PushGame.prune_states(state[:-1])\n else:\n return state", "def get_tasks_without_predecessors(self) -> TaskList:\n return TaskList([task for task in self._tasks if not len(task.predecessors)])", "def state_to_task(self, states):\n tasks = states\n return tasks", "def states(self):\n from copy import copy\n return copy(self._states_)", "def null_closure(self, states):\n closure = list(states)\n unchecked = list(states)\n while unchecked:\n state = unchecked.pop()\n null_transitions = self.move([state], NULL)\n for transition in null_transitions:\n if transition not in closure:\n closure.append(transition)\n unchecked.append(transition)\n return sorted(closure)", "def fix_invalid_state(self):\r\n\r\n # If we are on a task that is greater than the number of available tasks,\r\n # it is an invalid state. If the current task number is greater than the number of tasks\r\n # we have in the definition, our state is invalid.\r\n if self.current_task_number > len(self.task_states) or self.current_task_number > len(self.task_xml):\r\n self.current_task_number = max(min(len(self.task_states), len(self.task_xml)) - 1, 0)\r\n #If the length of the task xml is less than the length of the task states, state is invalid\r\n if len(self.task_xml) < len(self.task_states):\r\n self.current_task_number = len(self.task_xml) - 1\r\n self.task_states = self.task_states[:len(self.task_xml)]\r\n\r\n if not self.old_task_states and not self.task_states:\r\n # No validation needed when a student first looks at the problem\r\n return\r\n\r\n # Pick out of self.task_states and self.old_task_states the state that is\r\n # a) valid for the current task definition\r\n # b) not the result of a reset due to not having a valid task state\r\n # c) has the highest total score\r\n # d) is the most recent (if the other two conditions are met)\r\n\r\n valid_states = [\r\n task_states\r\n for task_states\r\n in self.old_task_states + [self.task_states]\r\n if (\r\n len(self.validate_task_states(self.task_xml, task_states)) == 0 and\r\n not self.is_reset_task_states(task_states)\r\n )\r\n ]\r\n\r\n # If there are no valid states, don't try and use an old state\r\n if len(valid_states) == 0:\r\n # If this isn't an initial task state, then reset to an initial state\r\n if not self.is_reset_task_states(self.task_states):\r\n self.reset_task_state('\\n'.join(self.validate_task_states(self.task_xml, self.task_states)))\r\n\r\n return\r\n\r\n sorted_states = sorted(enumerate(valid_states), key=self.states_sort_key, reverse=True)\r\n idx, best_task_states = sorted_states[0]\r\n\r\n if best_task_states == self.task_states:\r\n return\r\n\r\n log.warning(\r\n \"Updating current task state for %s to %r for student with anonymous id %r\",\r\n self.system.location,\r\n best_task_states,\r\n self.system.anonymous_student_id\r\n )\r\n\r\n self.old_task_states.remove(best_task_states)\r\n self.old_task_states.append(self.task_states)\r\n self.task_states = best_task_states\r\n\r\n # The state is ASSESSING unless all of the children are done, or all\r\n # of the children haven't been started yet\r\n children = [json.loads(child) for child in best_task_states]\r\n if all(child['child_state'] == self.DONE for child in children):\r\n self.state = self.DONE\r\n elif all(child['child_state'] == self.INITIAL for child in children):\r\n self.state = self.INITIAL\r\n else:\r\n self.state = self.ASSESSING\r\n\r\n # The current task number is the index of the last completed child + 1,\r\n # limited by the number of tasks\r\n last_completed_child = next((i for i, child in reversed(list(enumerate(children))) if child['child_state'] == self.DONE), 0)\r\n self.current_task_number = min(last_completed_child + 1, len(best_task_states) - 1)", "def removeLines(self) -> List['StateNode']:\n lines = self.state[0]\n states: List[StateNode] = []\n for i in range(len(lines)):\n for j in range(i + 1, len(lines) + 1):\n new_lines = lines[:i] + lines[j:]\n if len(new_lines) == 0:\n continue\n states.append(StateNode(self.table, \n (new_lines, self.state[1]),\n (lines[i:j], []),\n self.cost + len(self.state[1]),\n self))\n return states", "def _unprune_referenced_sub_workflows(self, keep_paths, prune_paths):\n\n keep_nodes = frozenset([path[-1] for path in keep_paths])\n\n shift_path_indexes = frozenset(\n idx for (idx, path) in enumerate(prune_paths)\n if any(node in keep_nodes for node in path))\n\n if not shift_path_indexes:\n return (keep_paths, prune_paths)\n\n for idx in shift_path_indexes:\n node = prune_paths[idx][-1]\n logger.info(\n \"Keeping node %s.%s because it is downstream of an --only-nodes argument\",\n node[0],\n node[1])\n\n return self._unprune_referenced_sub_workflows(\n keep_paths + [prune_paths[i] for i in shift_path_indexes],\n [path for (i, path) in enumerate(prune_paths) if i not in shift_path_indexes])", "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "def remove_unreachable_states(mona_data):\n\n # Remove all cases in the transition dict where the state is either a source or a destination\n def remove_state_from_transition_dict(transition_dict, state):\n return {(source, dest): label\n for (source, dest), label in transition_dict.items()\n if source != state and dest != state}\n\n # Decrements a state name if the value < filter state\n def filter_and_transform(state, filter_state):\n return str((int(state) - 1)) if int(state) > int(filter_state) else str(state)\n\n # decrement state name\n def decrement_state_name(transition_dict, state):\n return {(filter_and_transform(source, state), filter_and_transform(dest, state)): label\n for (source, dest), label in transition_dict.items()}\n\n states_to_remove = []\n\n # As per convention, only rejecting states can be unreachable\n for state in mona_data['rejecting_states']:\n\n # Check if this state cannot reach an accepting/reporting state\n # For now, we assume that only states that are not reporting and have\n # no outgoing (no self-referential) edges, cannot reach reporting\n unreachable = True\n for (source, dest), label in mona_data['transition_dict'].items():\n if source == state and dest != state:\n unreachable = False\n \n # If unreachable, remove the state\n if unreachable:\n states_to_remove.append(state)\n\n\n for state in states_to_remove:\n\n # Remove state from states\n assert state in mona_data['states']\n mona_data['states'].remove(state)\n mona_data['states'] = [filter_and_transform(state_i, state) for state_i in mona_data['states']]\n\n # Reduce num_states by one\n mona_data['num_states'] -= 1\n\n # Remove unreachable state and update rejecting states\n assert state in mona_data['rejecting_states']\n mona_data['rejecting_states'].remove(state)\n mona_data['rejecting_states'] = set([filter_and_transform(state_i, state) for state_i in mona_data['rejecting_states']])\n\n # Remove unreachable state and update relevant transitions\n mona_data['transition_dict'] = remove_state_from_transition_dict(mona_data['transition_dict'], state)\n mona_data['transition_dict'] = decrement_state_name(mona_data['transition_dict'], state)\n \n # Remove unreachable state and update dont_care transitions\n if state in mona_data['dont_care_states']:\n mona_data['dont_care_states'].remove(state)\n mona_data['dont_care_states'] = set([filter_and_transform(state_i, state) for state_i in mona_data['dont_care_states']])\n\n #Update Accepting states\n mona_data['accepting_states'] = [filter_and_transform(state_i, state) for state_i in mona_data['accepting_states']]", "def split_transitions(self):\n new = self.empty_copy()\n for state in self.states():\n new.add_state(FSMState((state, ()), is_initial=state.is_initial,\n is_final=state.is_final))\n for transition in self.transitions():\n for j in range(len(transition.word_in)-1):\n new.add_transition((\n (transition.from_state, tuple(transition.word_in[:j])),\n (transition.from_state, tuple(transition.word_in[:j+1])),\n transition.word_in[j],\n []))\n new.add_transition((\n (transition.from_state, tuple(transition.word_in[:-1])),\n (transition.to_state, ()),\n transition.word_in[-1:],\n transition.word_out))\n return new", "def remove_transactions(self, txs: list[BaseTransaction]) -> None:\n parents_to_update: dict[bytes, list[bytes]] = defaultdict(list)\n dangling_children: set[bytes] = set()\n txset = {not_none(tx.hash) for tx in txs}\n for tx in txs:\n assert tx.hash is not None\n tx_meta = tx.get_metadata()\n assert not tx_meta.validation.is_checkpoint()\n for parent in set(tx.parents) - txset:\n parents_to_update[parent].append(tx.hash)\n dangling_children.update(set(tx_meta.children) - txset)\n for spending_txs in tx_meta.spent_outputs.values():\n dangling_children.update(set(spending_txs) - txset)\n for tx_input in tx.inputs:\n spent_tx = tx.get_spent_tx(tx_input)\n spent_tx_meta = spent_tx.get_metadata()\n if tx.hash in spent_tx_meta.spent_outputs[tx_input.index]:\n spent_tx_meta.spent_outputs[tx_input.index].remove(tx.hash)\n self.save_transaction(spent_tx, only_metadata=True)\n assert not dangling_children, 'It is an error to try to remove transactions that would leave a gap in the DAG'\n for parent_hash, children_to_remove in parents_to_update.items():\n parent_tx = self.get_transaction(parent_hash)\n parent_meta = parent_tx.get_metadata()\n for child in children_to_remove:\n parent_meta.children.remove(child)\n self.save_transaction(parent_tx, only_metadata=True)\n for tx in txs:\n self.log.debug('remove transaction', tx=tx.hash_hex)\n self.remove_transaction(tx)", "def collect_garbage(results, task, visited_nodes, targets, dag):\n for ancestor in dag.predecessors(task):\n is_obsolete = all(\n successor in visited_nodes for successor in dag.successors(ancestor)\n )\n\n if is_obsolete and ancestor not in targets:\n del results[ancestor]\n\n return results", "def split(self, states_and_tasks):\n self._assert_is_batched(states_and_tasks)\n return self._tf_call(self._split, states_and_tasks)", "def _find_inaccessible_workflows(self, prune_nodes):\n\n referrer_map = self._build_referrer_map()\n\n removed_referring_nodes = frozenset(\n node for referrers in referrer_map.values()\n for node in referrers\n if node in prune_nodes)\n\n return frozenset(\n workflow for (workflow, referrers) in six.iteritems(referrer_map)\n if all(referrer in removed_referring_nodes for referrer in referrers))", "def remove_states(self, keys: list):\n if self.spec.graph:\n self.spec.graph.clear_children(keys)", "def reset(self, rng):\n tp = self.task_params\n g = self.task.env.task.graph\n env = self.task.env\n task = self.task\n init_states, goal_states, dists, paths = [], [], [], []\n for i in range(tp.batch_size):\n s, e, path = g.sample_random_goal(rng, tp.min_dist, tp.max_dist)\n # Compute distance to goal from all nodes.\n dist = g.get_path_distance([e])\n # Compute atleast one path between the source and the goal (to sample\n # demonstrations from).\n \n init_states.append(s)\n goal_states.append(e)\n dists.append(dist)\n paths.append(path)\n \n task.init_states, task.goal_states, task.dists, task.paths = \\\n init_states, goal_states, dists, paths\n task.history_f = []\n _ = env.reset(rng, init_states=init_states, batch_size=tp.batch_size)\n return init_states", "def dirty(self) -> IdentitySet:\n return IdentitySet(\n [\n state.obj()\n for state in self._dirty_states\n if state not in self._deleted\n ]\n )", "def remove_unconnected_transitions(net):\n transitions = list(net.transitions)\n i = 0\n while i < len(transitions):\n if len(transitions[i].in_arcs) == 0 and len(transitions[i].out_arcs) == 0:\n remove_transition(net, transitions[i])\n i = i + 1\n return net", "def clean_tmatrix(transition_matrix, rm_absorbing=True):\n t_matrix = deepcopy(transition_matrix)\n n_states = len(transition_matrix)\n\n # Removing the non-visited states and absorbing states\n removed_states = []\n for index in range(n_states - 1, -1, -1):\n if not any(t_matrix[index]): # non-visited\n t_matrix = np.delete(t_matrix, index, axis=1)\n t_matrix = np.delete(t_matrix, index, axis=0)\n removed_states.append(index)\n elif t_matrix[index, index] == 1.0: # absorbing state\n if not all([t_matrix[index, j] == 0.0 for j in range(n_states) if j != index]):\n raise ValueError(\n \"The sum of the elements in a row of the \\\n transition matrix must be one\"\n )\n t_matrix = np.delete(t_matrix, index, axis=1)\n t_matrix = np.delete(t_matrix, index, axis=0)\n removed_states.append(index)\n\n # Renormalizing just in case\n t_matrix = normalize_markov_matrix(t_matrix)\n\n return t_matrix, removed_states", "def _strip_workflow_nodes(self, workflow, graph):\n original_workflow_node_names = frozenset(\n wf['name'] for wf in self.get_all_nodes(workflow))\n keyed_nodes = {node.name: node for node in graph.nodes()}\n\n def strip_section_operators(operators):\n result = []\n for operator in operators:\n if operator['name'] not in keyed_nodes:\n continue\n\n dependencies = [\n node.name\n for node in graph.predecessors(keyed_nodes[operator['name']])\n if node.name in original_workflow_node_names\n ]\n\n new_operator = operator.copy()\n new_operator['upstream_dependencies'] = dependencies\n if not dependencies:\n new_operator.pop('upstream_dependencies')\n\n # Remove any downstream dependencies that may have been specified\n # in the original graph, because we will use upstream dependencies\n # (arbitarily) as the mechanism for specifying all dependencies\n if 'downstream_dependencies' in new_operator:\n new_operator.pop('downstream_dependencies')\n\n result.append(new_operator)\n\n return result\n\n new_workflow = workflow.copy()\n\n for section_name in ['before', 'after', 'operators', 'generators', 'sub_dags']:\n if section_name not in workflow:\n continue\n\n new_section = strip_section_operators(workflow[section_name])\n if new_section:\n new_workflow[section_name] = new_section\n logger.debug('New workflow section %s: %s',\n section_name, new_section)\n else:\n new_workflow.pop(section_name)\n logger.debug('Removing workflow section %s', section_name)\n\n return new_workflow", "def reset_task_state(self) -> None:\n self.set_task_state(task_state=self.sample_task_state())", "def flat(self):\n to_clean = []\n while self.nodes:\n head, children = self.nodes.popitem(0)\n to_clean.extend([x for x in self._get_leafs(head, children)])\n return to_clean", "def revert_state(self):\n if self.previous_states > 0: # checks for empty\n self.update_status(self.previous_states.pop())", "def optimize_states(old_state, committed_state, new_state):\n old = old_state['actions']\n committed = committed_state['actions']\n new = new_state['actions']\n\n old, new, committed = map(optimize_actions, [old, new, committed])\n\n old_state['actions'] = old\n committed_state['actions'] = committed\n new_state['actions'] = new", "def _dirty_states(self) -> Iterable[InstanceState[Any]]:\n return self.identity_map._dirty_states()", "def removeAllTasks(self):\n for taskName in self.runningTasks:\n taskMgr.remove(taskName)", "def removeDuplicateTasks(tasks):\n if len(tasks) < 2:\n return tasks\n uniqueTasks = []\n\n for t in tasks:\n haveSeenT = findTaskInList(t, uniqueTasks)\n if not haveSeenT:\n uniqueTasks.append(t)\n\n return uniqueTasks", "def remove_nodes(self, nodes):\n for node in nodes:\n for arc in node.entries:\n arc.src.exits.remove(arc)\n self.arcs.remove(arc)\n for arc in node.exits:\n arc.dest.entries.remove(arc)\n self.arcs.remove(arc)\n self.nodes.remove(node)\n dangling_nodes = []\n for node in self.nodes:\n if node == self.start or node == self.end:\n pass\n else:\n if not node.exits or not node.entries:\n dangling_nodes.append(node)\n if dangling_nodes:\n self.remove_nodes(dangling_nodes)" ]
[ "0.7007315", "0.6059291", "0.57722616", "0.5648813", "0.54974157", "0.54269755", "0.53582656", "0.53353125", "0.52991706", "0.52278084", "0.5219097", "0.5181115", "0.5153454", "0.5147667", "0.5111569", "0.51057416", "0.5078182", "0.5068822", "0.5066051", "0.49776638", "0.4975756", "0.49648127", "0.4957759", "0.495509", "0.49450108", "0.49379882", "0.4892734", "0.4890007", "0.48669925", "0.48630396" ]
0.84246093
0
Returns a stripped copy of a TaskState.
def strip_state(original_state): task_key = original_state.task_key if task_key in stripped_states_by_task_key: return stripped_states_by_task_key[task_key] assert original_state in self.all_states assert original_state not in self.non_serializable_states # Make a copy of the TaskState, which we'll strip down to make it # easier to serialize. # (This is a shallow copy, so we'll make sure to avoid mutating any of # its member variables.) stripped_state = copy.copy(original_state) stripped_states_by_task_key[task_key] = stripped_state # Strip out data cached in memory -- we can't necessarily pickle it, so # we need to get rid of it before trying to transmit this state to # another process. stripped_state._result = None # External dependency states are expected to be already completed, so we # don't need to include their task information or any of their dependencies. if original_state in self.external_dependency_states: stripped_state.task = None stripped_state.func_attrs = None stripped_state.dep_states = [] # Otherwise, we'll recursively strip all the dependency states as well. else: stripped_state.dep_states = [ strip_state(dep_state) for dep_state in original_state.dep_states ] # We also strip and include any followup states. stripped_state.followup_states = [ strip_state(followup_state) for followup_state in original_state.followup_states ] return stripped_state
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def strip_states(self, states):\n\n stripped_states_by_task_key = {}\n\n def strip_state(original_state):\n \"\"\"Returns a stripped copy of a TaskState.\"\"\"\n\n task_key = original_state.task_key\n if task_key in stripped_states_by_task_key:\n return stripped_states_by_task_key[task_key]\n\n assert original_state in self.all_states\n assert original_state not in self.non_serializable_states\n\n # Make a copy of the TaskState, which we'll strip down to make it\n # easier to serialize.\n # (This is a shallow copy, so we'll make sure to avoid mutating any of\n # its member variables.)\n stripped_state = copy.copy(original_state)\n stripped_states_by_task_key[task_key] = stripped_state\n\n # Strip out data cached in memory -- we can't necessarily pickle it, so\n # we need to get rid of it before trying to transmit this state to\n # another process.\n stripped_state._result = None\n\n # External dependency states are expected to be already completed, so we\n # don't need to include their task information or any of their dependencies.\n if original_state in self.external_dependency_states:\n stripped_state.task = None\n stripped_state.func_attrs = None\n stripped_state.dep_states = []\n\n # Otherwise, we'll recursively strip all the dependency states as well.\n else:\n stripped_state.dep_states = [\n strip_state(dep_state) for dep_state in original_state.dep_states\n ]\n\n # We also strip and include any followup states.\n stripped_state.followup_states = [\n strip_state(followup_state)\n for followup_state in original_state.followup_states\n ]\n\n return stripped_state\n\n return [strip_state(state) for state in states]", "def deassert_state(self, state):\n return DeassertStateVariable(self, state)", "def __getstate__(self):\n state = self.__dict__.copy()\n self.__cleanState__(state)\n return state", "def state_raw(self):\n return self._state_raw", "def __cleanState__(self, stateDict):\n for k in list(stateDict.keys()):\n if k.startswith('_'):\n stateDict.pop(k)\n return stateDict", "def state_to_task(self, states):\n tasks = states\n return tasks", "def get_task_state(self, task):\n return self._read_transaction(tx.get_task_state, task=task)", "def __getstate__(self):\n state = self.__dict__.copy()\n del state['_view']\n return state", "def prune_states(state: PushState) -> PushState:\n if state and not state[-1]:\n return PushGame.prune_states(state[:-1])\n else:\n return state", "def get_task_state(self, task):\n return self._gdb_interface.get_task_state(task)", "def get_state(self):\n return copy.deepcopy(self._state)", "def clone_full_state(self):\n state_ref = self.ale.cloneSystemState()\n state = self.ale.encodeState(state_ref)\n self.ale.deleteState(state_ref)\n return state", "def drop_suspended_state(self):\n\t\treturn Job(SDK.PrlVm_DropSuspendedState(self.handle)[0])", "def state_name(self):\n return task_states.to_str(self.state)", "def clone_state(self):\n state_ref = self.ale.cloneState()\n state = self.ale.encodeState(state_ref)\n self.ale.deleteState(state_ref)\n return state", "def __getstate__(self):\n state = self.__dict__\n state['_lock'] = None\n return state", "def __getstate__(self):\n import time\n\n state = self.__dict__.copy()\n\n # Remove the unpicklable entries\n del state['_model']\n del state['_input']\n del state['_output']\n del state['_preds_op']\n del state['_loss']\n del state['_loss_grads']\n del state['_preds']\n del state['_layer_names']\n\n model_name = str(time.time()) + '.h5'\n state['model_name'] = model_name\n self.save(model_name)\n return state", "def dump(self, state: bytes) -> uuid.UUID:\n LOGGER.debug('Dumping state (%d bytes)', len(state))\n return self._generation.lineage.dump(state)", "def __getstate__(self):\n state: Dict[str, Any] = deepcopy(self.__dict__)\n del state['__db']\n return state", "def __getstate__(self):\n try: \n state = self.__dict__.copy()\n del state['_Funcs']\n return state\n except: \n return self.__dict__", "def copy(self):\n state = State(self.state_object, self.compute_dag)\n state.stage_id_map = self.stage_id_map.copy()\n return state", "def silly(self) -> str:\n print(f\"Getting {self._name}'s State\")\n return self._state", "def reset_task_state(self) -> None:\n self.set_task_state(task_state=self.sample_task_state())", "def getIgnoreState(self):\n return self.__ignore_white", "def __getstate__(self):\n state = copy.copy(self.__dict__)\n state.pop('_JobHandler__queueLock')\n #XXX we probably need to record how this was init, and store that\n # such as the scheduler file\n if self._parallelLib == ParallelLibEnum.dask and '_server' in state:\n state.pop('_server')\n return state", "def state(self):\n return self._state.copy()", "def __getstate__ (self):\n state = self.__dict__.copy()\n restore = []\n for name in state.keys():\n if ((len( name ) > 1) and (name[-1] == '_') and\n (name[:-1] in state)):\n try:\n dumps( state[ name ] )\n except:\n del state[ name ]\n restore.append( name[:-1] )\n if len( restore ) > 0:\n state[ '__HasTraits_restore__' ] = restore\n\n return state", "def state_string(self):\n return AddressStates._to_string(self.state)", "def state_name(self):\n return TASK_STATE.get(self.state, 'UNKNOWN')", "def get_state_as_string(state):\n string_state = \"\"\n for element in state:\n string_state += \" \" + element\n return string_state.lstrip()" ]
[ "0.74558896", "0.59286696", "0.5859667", "0.5805591", "0.57871336", "0.5779528", "0.5778583", "0.5769516", "0.5739012", "0.57232314", "0.56408906", "0.5633798", "0.5573608", "0.55289465", "0.5504658", "0.5491149", "0.547416", "0.5440028", "0.5384897", "0.5383192", "0.5378312", "0.53782594", "0.5377563", "0.5369875", "0.5362726", "0.53429216", "0.53414124", "0.5329126", "0.5303151", "0.5301289" ]
0.7607578
0
View to return all information needed to display the cart, by converting what has been saved to the session into key variables. Protection in place in case a product, size or nic has been deleted while still in the cart, removing from the list before saving back to the cart session variable.
def cart_contents(request): cart_items = [] total = 0 savings = 0 product_count = 0 points_available = 0 points_earned = 0 discount_applied = request.session.get('discount_applied') cart = request.session.get('cart', {}) # Create a new dict so that items can be removed if needed new_dict = {k: v for k, v in cart.items()} for item, quantity in new_dict.items(): # Use string created in cart view to isolate model ids product_id = item.split("_")[0] size_id = item.split("_")[1] nic_id = item.split("_")[2] # Retrieve relevant objects for templating and remove if # no longer in database try: product = Product.objects.get(pk=product_id) except Product.DoesNotExist: del cart[item] messages.error(request, 'An item was removed from your cart as it is \ no longer available. Try to find a worthy replacement!') continue # Repeat for Size try: size = Size.objects.get(pk=size_id) except Size.DoesNotExist: del cart[item] messages.error(request, 'An item could not be added as its \ size is no longer available. \ Try to find a worthy replacement!') continue # Repeat for Nicotine try: nic = Nicotine.objects.get(pk=nic_id) except Nicotine.DoesNotExist: del cart[item] messages.error(request, 'An item could not be added as its \ nicotine options have changed. \ Try to find a worthy replacement!') continue # Check sale status and retrieve relevant price from Size model if product.on_sale: price = size.sale_price savings += (size.price - size.sale_price) * quantity else: price = size.price total += quantity * price product_count += quantity cart_items.append({ 'item_id': item, 'product': product, 'size': size, 'nic': nic, 'price': price, 'quantity': quantity, }) original_total = total request.session['cart'] = cart # Get user profile if request.user.is_authenticated: profile = get_object_or_404(UserProfile, user_id=request.user) else: profile = None # Check for available points if profile: points_available = profile.points # Check if user has chosen to redeem points and that the discount # will never take the total below zero if discount_applied: if total - Decimal(points_available / 100) <= 0: total = 0 else: total -= Decimal(points_available / 100) if total < settings.FREE_DELIVERY_THRESHOLD: delivery = Decimal(settings.STANDARD_DELIVERY) free_delivery_delta = settings.FREE_DELIVERY_THRESHOLD - total else: delivery = 0 free_delivery_delta = 0 grand_total = delivery + total points_earned = int(math.floor(total)) context = { 'cart_items': cart_items, 'total': total, 'original_total': original_total, 'savings': savings, 'product_count': product_count, 'delivery': delivery, 'free_delivery_delta': free_delivery_delta, 'free_delivery_threshold': settings.FREE_DELIVERY_THRESHOLD, 'grand_total': grand_total, 'points_available': points_available, 'discount_applied': discount_applied, 'points_earned': points_earned, } return context
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detail(request):\n # del request.session['cart_id']\n # del request.session['total_in_cart']\n data = {}\n if (cart_id := request.session.get('cart_id', None)):\n cart = Cart.objects.get(pk=cart_id)\n data['products_in_cart'] = cart.cartitems.all()\n data['total_price'] = cart.cart_price\n\n return render(request, 'cart/details.html', data)", "def cart_detail(request):\n cart = Cart(request)\n # Allow user to change the quantity from the details page.\n for item in cart:\n # Remember that a cart is stored as a dictionary in the user's session.\n # Here, we're adding a new key/value pair to the cart.\n # Create an instance of CartAddProductForm for each item in the cart to\n # allow changing product quantities. Initialize the form with the current\n # item quantity and set the update field to True so that when we submit the\n # form to the cart_add view, the current quantity is replaced with the new\n # one.\n # I DON'T QUITE UNDERSTAND WHAT THIS CODE IS DOING.\n item['update_quantity_form'] = CartAddProductForm(\n initial={'quantity': item['quantity'],\n 'update': True})\n coupon_apply_form = CouponApplyForm()\n return render(request, 'cart/detail.html', {'cart': cart, 'coupon_apply_form': coupon_apply_form})", "def view_cart(request):\n categories = all_categories()\n productTypes = all_productTypes()\n return render(request, \"cart.html\", {\"categories\": categories,\n \"productTypes\": productTypes})", "def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total_cart = 0\n item_count = 0\n partial_value = []\n\n for item in cart:\n if item == 'car':\n id = cart['car']['item_id']\n quantity = cart['car']['quantity']\n instance = Car\n item_type = 'car'\n elif item == 'track_day':\n id = cart['track_day']['item_id']\n quantity = cart['track_day']['quantity']\n instance = TrackDayAddon\n item_type = 'track_day'\n elif item == 'insurance':\n id = cart['insurance']['item_id']\n quantity = cart['insurance']['quantity']\n instance = InsuranceAddon\n item_type = 'insurance'\n elif item == 'private_driver':\n id = cart['private_driver']['item_id']\n quantity = cart['private_driver']['quantity']\n instance = PrivateDriverAddon\n item_type = 'private_driver'\n\n item = get_object_or_404(instance, pk=id)\n total_cart += quantity * item.price\n item_total = quantity * item.price\n item_count += 1\n\n partial_value.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'item_total': item_total\n })\n cart_items.append({\n 'item': item,\n 'item_type': item_type,\n 'id': id,\n 'quantity': quantity,\n })\n\n return {'cart_items': cart_items, 'partial_value': partial_value,\n 'total_cart': total_cart, 'item_count': item_count}", "def display(auth_context):\n\n cart = carts.get_cart(auth_context.get('uid'))\n for item in cart:\n product = product_catalog.get_product(item.item_id)\n item.info = product\n\n return render_template('cart.html',\n cart=cart,\n auth_context=auth_context,\n bucket=product_catalog.BUCKET)", "def view_cart(request):\n \n return render(request, \"cart.html\" )", "def cart(request):\n return {'cart': get_cart_from_request(request)}", "def get(self,request):\r\n try:\r\n if request.user.is_authenticated():\r\n cart = self.cart_obj.get_cart_by_user(request.user)\r\n else:\r\n cart = self.cart_obj.get_cart_by_id(request.session.get('cart_id',None))\r\n \r\n if not cart:\r\n self.context['no_items'] = True\r\n return render(request, 'cart.html', self.context)\r\n request.session['cart_id'] = cart.first().id\r\n cart_details_list =[]\r\n if cart:\r\n cart_details = self.cart_det_obj.get_cart_items(cart.first().id) \r\n \"\"\" \r\n :Note If face any issue with cart order by cartid and get the latest cartid.\r\n \"\"\"\r\n for cart in cart_details:\r\n product = Product.objects.filter(id=cart.product_id)\r\n cart_temp_dict = {}\r\n cart_temp_dict['product'] = product.first()\r\n cart_temp_dict['quantity'] = cart.quantity\r\n cart_temp_dict['price'] = product.first().price\r\n cart_temp_dict[cart.id] = cart.id\r\n cart_details_list.append(cart_temp_dict)\r\n \r\n self.context['cart_details'] = cart_details_list\r\n self.context['cart_count'] = cart_details.count()\r\n response = render(request, 'cart.html', self.context)\r\n return response\r\n except:\r\n print(\"500\")\r\n raise Exception", "def cart_detail(request):\n cart = Cart(request)\n return render(request, 'cart/cart.html', {'cart': cart})", "def cart_contents(request):\n\n price = 10\n total = 0\n tickets_count = 0\n\n cart = request.session.get('cart', {})\n\n cart_items = []\n upvote_list = []\n\n for id, quantity in cart.items():\n ticket = get_object_or_404(Ticket, pk=id)\n\n upvote_list.append(id)\n tickets_count += quantity # Items in cart\n total += quantity * price # Total to be paid\n\n cart_items.append({'id': id, 'quantity': quantity,\n 'ticket': ticket, 'price': price})\n\n return {'tickets_count': tickets_count,\n 'cart_items': cart_items,\n 'total': total,\n 'price': price,\n 'upvote_list': upvote_list}", "def cart_contents(request):\n\n cart = request.session.get('cart', {})\n cart_items = []\n upvote_list = []\n price = 10\n total = 0\n ticket_count = 0\n\n for id, quantity in cart.items():\n ticket = get_object_or_404(Issues, pk=id)\n upvote_list.append(id)\n ticket_count += quantity\n total += quantity * price\n cart_items.append({'id': id, 'quantity': quantity,\n 'ticket': ticket, 'price': price})\n\n return {'ticket_count': ticket_count,\n 'cart_items': cart_items,\n 'total': total,\n 'upvote_list': upvote_list}", "def view_cart(request):\n\n return render(request, 'cart/cart.html')", "def view_cart(request):\n\n return render(request, 'cart/cart.html')", "def view_cart(request):\n return render(request, \"cart.html\")", "def view_cart(request):\n return render(request, \"cart.html\")", "def view_cart(request):\n return render(request, \"cart.html\")", "def cart_detail(request, pk):\n data = request.data\n try:\n user = validations_utils.user_validation(pk) # Validates if user exists or not.\n token_user_id = validations_utils.user_token_validation(\n request.auth.user_id, pk) # Validates user's Token authentication.\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n\n if request.method == 'GET':\n if Cart.objects.filter(user_id=user.id).exists(): # Checks if product_category exists with given id.\n cart_items = Cart.objects.filter(user_id=user.id)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_404_NOT_FOUND)\n if cart_items:\n cart_serializer = CartSerializer(cart_items, many=True)\n cart_data = cart_serializer.data\n data = []\n for obj in cart_data:\n x = utils.get_item_id(obj)\n item = validations_utils.item_validation(int(x))\n obj['name'] = item.name\n data.append(obj)\n return Response(data, status=status.HTTP_200_OK)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_204_NO_CONTENT)", "def cart_contents(request):\n cart = request.session.get('cart', {})\n cart_items = []\n\n total = 0\n feature_count = 0\n\n for id, quantity in cart.items():\n feature = get_object_or_404(Feature, pk=id)\n total += quantity * feature.vote_price\n feature_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'feature': feature})\n return {\n 'cart_items': cart_items,\n 'total': total,\n 'feature_count': feature_count\n }", "def cart_detail(request):\n assert isinstance(request, HttpRequest)\n\n if request.method == \"POST\":\n cart_service.remove_from_cart(request)\n\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )\n else:\n return render(\n request,\n 'cartapp/cart_detail.html',\n {\n 'title':'Cart Page',\n 'year':datetime.now().year,\n }\n )", "def index(request):\n\n \"\"\" Generate couns of some of the main objects\"\"\"\n num_products = Product.objects.all().count()\n num_instances = ProductInstance.objects.all().count()\n\n # Available items (Status = 'a')\n num_instances_available = ProductInstance.objects.filter(status__exact='a').count()\n\n # The 'all()' is implied for default.\n num_brands = Brand.objects.count()\n ###\n num_categories = Category.objects.count()\n\n ## cart stuff\n #intitialize cart = False\n #which is a hack to keep population of context dict from crashing :()\n cart = False\n testCart = False\n\n if request.user.is_authenticated:\n print(request.user.email)\n username = request.user.username\n print(username)\n id =(request.user.id)\n try:\n cart = Cart.objects.filter(cartOwner=request.user, status = 'b')[0]\n except:\n c = Cart(cartOwner=request.user, status='b', shoppingSince=timezone.now())\n c.save()\n if cart:\n if cart_is_empty(cart):\n cart=False\n \n #testCart\n user = request.user\n if TestCart.objects.filter(cartOwner=user, status='b').count() < 1:\n testCart = TestCart(cartOwner=user, status='b')\n testCart.save()\n testCart = TestCart.objects.filter(cartOwner=user, status='b')[0]\n print(testCart)\n if testCart.itemsInCart.count() < 1:\n testCart = False\n\n # number of visis to this view, as counted in he session variable\n num_visits = request.session.get('num_visits', 0)\n request.session['num_visits'] = num_visits + 1\n\n context = {\n 'num_products': num_products,\n 'num_instances': num_instances,\n 'num_instances_available': num_instances_available,\n 'num_brands': num_brands,\n 'num_categories': num_categories,\n 'num_visits': num_visits,\n 'cart': cart,\n 'testCart': testCart,\n }\n # Render the html template index.html with data in the context variable\n return render(request, 'index.html', context=context)", "def basket_contents(request):\n basket = request.session.get('basket', {})\n\n basket_items = []\n order_total = 0\n product_count = 0\n\n for id, quantity in basket.items():\n product = get_object_or_404(Products, pk=id)\n if product.sale_price:\n order_total += quantity * product.sale_price\n product_count += quantity\n request.session['product_count'] = product_count\n basket_items.append({\n 'id': id,\n 'quantity': quantity,\n 'product': product\n })\n else:\n order_total += quantity * product.price\n product_count += quantity\n request.session['product_count'] = product_count\n basket_items.append({\n 'id': id,\n 'quantity': quantity,\n 'product': product\n })\n\n if order_total < settings.MIN_DELIVERY_THRESHOLD:\n delivery_total = round(Decimal(settings.MIN_DELIVERY_CHARGE), 2)\n else:\n delivery_total = round(Decimal(settings.UPPER_DELIVERY_CHARGE), 2)\n\n grand_total = delivery_total + order_total\n\n context = {\n 'basket_items': basket_items,\n 'order_total': order_total,\n 'delivery_total': delivery_total,\n 'grand_total': grand_total,\n 'product_count': product_count,\n }\n return context", "def cart_detail(request):\r\n \r\n cart = Cart(request)\r\n\r\n for item in cart:\r\n item['update_quantity_form'] = CartAddProductForm(initial={'quantity': item['quantity'],\r\n 'override': True})\r\n # cartClass = CartAddProductForm_func(item['book'].inventory + item['quantity'])\r\n # item['update_quantity_form'] = cartClass(initial={'quantity': item['quantity'],\r\n # 'override': True})\r\n \r\n coupon_apply_form = CouponApplyForm()\r\n isvalid_discount = False\r\n \r\n a = 1\r\n if a == -1:\r\n isvalid_discount = True \r\n else :\r\n isvalid_discount = False \r\n\r\n return render(request, 'cart/cart_detail.html', {'cart': cart,'coupon_apply_form':coupon_apply_form, 'isvalid_discount':isvalid_discount})", "def menu(request):\n cart = cartData(request)\n cart_items = cart['cart_items']\n # order = cart['order']\n # items = cart['items']\n # Get all our object\n products = BobaProduct.objects.all()\n # Dictionary to hold our products\n context = {\"products\": products, \"cart_items\": cart_items}\n return render(request, 'store/menu.html', context)", "def cart_contents(request):\n cart = request.session.get('cart', {})\n\n cart_items = []\n total = 0\n child_count = 0\n \n for id, donation in cart.items():\n child = get_object_or_404(Child, pk=id)\n total += donation\n child_count += 1\n cart_items.append({'id': id, 'donation': donation, 'child': child})\n \n return {'cart_items': cart_items, 'total': total, 'child_count': child_count}", "def cart(request):\r\n\r\n # ---------------------------------------------------------------\r\n # Used to load user's cart\r\n # Order (get order ID where customer id is current user's customer ID) -> OrderProduct (for product IDs on open order) -> Product (get product data)\r\n sql = \"\"\"SELECT *, website_orderproduct.id as \"order_product_id\"\r\n FROM website_order\r\n JOIN website_orderproduct ON website_orderproduct.order_id = website_order.id\r\n JOIN website_product ON website_product.id = website_orderproduct.product_id\r\n WHERE customer_id = %s AND website_order.payment_type_id IS NULL\r\n \"\"\"\r\n\r\n # used to delete single join table\r\n sql_delete = \"\"\"DELETE FROM website_orderproduct\r\n WHERE order_id = %s AND id = %s\r\n \"\"\"\r\n\r\n # used to delete the user's open order\r\n sql_delete_open_order = \"\"\"DELETE FROM website_order\r\n WHERE website_order.id = %s AND website_order.payment_type_id IS NULL\r\n \"\"\"\r\n # ---------------------------------------------------------------\r\n\r\n customer_id = request.user.customer.id\r\n\r\n # A delete button was clicked - if it's the 'cancel order' button AND!!! the user provides confirmation, then delete all OrderProduct join tables and the open order. Otherwise, delete the specific product that was clicked.\r\n if request.method == \"POST\":\r\n\r\n try:\r\n cancel_order_confirmation = request.POST[\"confirmed_deletion\"] # if this is exists on POST, then the user has confirmed the order's deletion. if not -> except\r\n order_id = request.POST[\"order_id\"]\r\n products = Order.objects.raw(sql, [customer_id])\r\n\r\n for product in products:\r\n with connection.cursor() as cursor:\r\n cursor.execute(sql_delete, [order_id, product.order_product_id])\r\n\r\n with connection.cursor() as cursor:\r\n cursor.execute(sql_delete_open_order, [order_id])\r\n\r\n return HttpResponseRedirect(reverse(\"website:products\"))\r\n\r\n except:\r\n\r\n try:\r\n cancel_order = request.POST[\"empty_cart\"] # if this exists on POST, then the user clicked the cancel all button, so prompt for confirmation. if not -> except\r\n context = {\"order_id\": request.POST[\"order_id\"], \"delete_confirmation\": True}\r\n return render(request, \"cart.html\", context)\r\n\r\n except:\r\n # last valid option that would trigger a POST: a user clicked delete button on a specific product in their cart, so remove it\r\n order_product_id = request.POST[\"order_product_id\"]\r\n order_id = request.POST[\"order_id\"]\r\n with connection.cursor() as cursor:\r\n cursor.execute(sql_delete, [order_id, order_product_id])\r\n\r\n # check if there are remaining items in cart. If cart is empty, delete open order\r\n order = Order.objects.raw(sql, [customer_id])\r\n order_size = len(order)\r\n if order_size == 0:\r\n with connection.cursor() as cursor:\r\n cursor.execute(sql_delete_open_order, [order_id])\r\n\r\n # redirect user back to their cart\r\n return HttpResponseRedirect(reverse(\"website:cart\"))\r\n\r\n # load user's cart when clicking the link in the navbar.\r\n try:\r\n if request.method == \"GET\":\r\n # get user's open order information. If there's no open order, then the context is effectively empty, and the except clause takes effect. If an order table is returned (i.e. the order variable), then it has one row per product\r\n order = Order.objects.raw(sql, [customer_id])\r\n\r\n # get products from queryset (effectively the same rows as the order variable already has) to provide the template with a more obvious context variable\r\n products = list()\r\n for product in order:\r\n products.append(product)\r\n\r\n # calculate total cost of products in open order\r\n total = 0\r\n for product in order:\r\n total += product.price\r\n\r\n context = {\"order_id\": order[0].id, \"order\": order, \"products\": products, \"total\": total}\r\n return render(request, \"cart.html\", context)\r\n except:\r\n context = {}\r\n return render(request, \"cart.html\", context)", "def shopping_cart(request, movie_id=None):\n cart = request.session.get('cart', [])\n tickets = Tickets.objects.filter(id__in=cart)\n context = {\n 'tickets': tickets,\n 'cart': cart\n }\n\n return render(request, 'shopping_cart.html', context)", "def cart_contents(request):\n cart = request.session.get('cart', {})\n \n cart_items = []\n total = 0\n feature_count = 0\n \n for id, quantity in cart.items():\n feature = get_object_or_404(FeatureTicket, pk=id)\n print(feature)\n total += quantity * feature.contribution\n feature_count += quantity\n cart_items.append({'id': id, 'quantity': quantity, 'feature': feature})\n \n return { 'cart_items': cart_items, 'total': total, 'feature_count': feature_count }", "def cart(request, pk, key):\n data = request.data\n try:\n user = validations_utils.user_validation(pk) # Validates if user exists or not.\n token_user_id = validations_utils.user_token_validation(\n request.auth.user_id, pk) # Validates user's Token authentication.\n item = validations_utils.item_validation(key)\n\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n if request.method == 'POST':\n try:\n with transaction.atomic():\n try:\n data['user'] = user.id\n data['item'] = item.id\n data = utils.add_item_to_cart(data) # Creates user with request data.\n return Response(data, status=status.HTTP_201_CREATED)\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n except IntegrityError:\n return Response(messages.ADD_ITEM_TO_CART_FAILED, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n elif request.method == 'DELETE':\n try:\n with transaction.atomic():\n try:\n if Cart.objects.filter(user_id=user.id).filter(item_id=item.id).exists():\n # Checks if product_category exists with given id.\n cart_item_obj = Cart.objects.filter(user_id=user.id).filter(item_id=item.id)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_404_NOT_FOUND)\n if cart_item_obj:\n # cart_item = Cart.objects.get(pk=cart_item_obj.id)\n cart_item_obj.delete()\n return Response(messages.CART_ITEM_SUCCESSFULLY_DELETED, status=status.HTTP_200_OK)\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n except IntegrityError:\n return Response(messages.DELETE_ITEM_TO_CART_FAILED, status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n elif request.method == 'PUT':\n try:\n with transaction.atomic():\n try:\n data['user'] = user.id\n data['item'] = item.id\n if Cart.objects.filter(user_id=user.id).filter(item_id=item.id).exists():\n # Checks if product_category exists with given id.\n cart_item_obj = Cart.objects.get(user_id=user.id, item_id=item.id)\n else:\n return Response(messages.EMPTY_CART, status=status.HTTP_404_NOT_FOUND)\n try:\n cart_item = validations_utils.cart_item_validation(cart_item_obj.id)\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n updated_data = utils.update_cart_item(data, cart_item) # Updates cart data.\n return Response(updated_data, status=status.HTTP_200_OK)\n except ValidationException as e: # Generic exception\n return Response(e.errors, status=e.status)\n except IntegrityError:\n return Response(messages.UPDATE_ITEM_TO_CART_FAILED, status=status.HTTP_500_INTERNAL_SERVER_ERROR)", "def styleboard(request, cat_id=None):\n if cat_id:\n if not get_cat(cat_id):\n return redirect('styleboard')\n\n sessionid = request.session.get('cartsession',None)\n if not sessionid: \n session_id = generate_unique_id()\n request.session['cartsession'] = session_id\n\n info = {}\n\n idecorateSettings = IdecorateSettings.objects.get(pk=1)\n info['global_default_quantity'] = idecorateSettings.global_default_quantity\n info['global_guest_table'] = idecorateSettings.global_table \n\n info['mode'] = 'styleboard'\n search = request.POST.get('search',None)\n if search:\n info['keyword'] = search\n info['keyword_cat'] = 0\n search_result_cat = search_category(search)\n if search_result_cat:\n cat_id = search_result_cat.id\n info['keyword_cat'] = cat_id\n info['mode'] = 'search' \n info['category_count'] = 0\n else:\n categories = get_categories(cat_id)\n if categories.count() > 0:\n info['categories'] = categories\n\n info['category_count'] = categories.count()\n\n if not cat_id:\n cat_id = 0\n info['cat_id'] = cat_id\n\n product_positions = request.session.get('product_positions', None)\n\n if product_positions:\n info['product_positions'] = mark_safe(str(product_positions))\n #del request.session['product_positions']\n else:\n info['product_positions'] = mark_safe(\"''\")\n\n info['max_emb_size'] = settings.MAX_UPLOAD_EMBELLISHMENT_IMAGE_SIZE\n info['text_items'] = TextFonts.objects.filter(is_active=True, is_deleted=False)\n\n \"\"\"\n save styleboard personalize or modify\n \n try:\n del request.session['customer_styleboard']\n except:\n pass\n \n try:\n del request.session['cartsession']\n except:\n pass\n \"\"\"\n\n sms = st_man(request)\n\n if sms['sbid']:\n\n request.session['sbid'] = sms['sbid']\n\n info.update(sms)\n \n template_view = request.GET.get('template')\n\n if template_view :\n\n info['view_template'] = template_view\n\n return render_to_response('interface/styleboard2.html', info,RequestContext(request))", "def get_cart_items(request):\n return CartItem.objects.filter(cart_id = get_cart_id_session(request))" ]
[ "0.74897677", "0.70164984", "0.6997387", "0.68789303", "0.68103784", "0.6678317", "0.66267216", "0.6622569", "0.66087735", "0.6606675", "0.6523773", "0.6521631", "0.6521631", "0.647599", "0.647599", "0.647599", "0.64378947", "0.6432149", "0.63870263", "0.63262737", "0.63043016", "0.62684155", "0.6248039", "0.6240263", "0.6180363", "0.6169673", "0.61267954", "0.6047135", "0.6043138", "0.6036614" ]
0.76600933
0
Outputs magnetic field given lat, lon, alt.
def magnetic_field(date: datetime.datetime, lat, lon, alt, output_format='cartesian'): g = GeoMag() return g.GeoMag(np.array([lat, lon, alt]), date, location_format='geodetic', output_format=output_format)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def magnetization(h):\n if h.has_eh: raise\n if h.has_spin: \n mx = extract.mx(h.intra)\n my = extract.my(h.intra)\n mz = extract.mz(h.intra)\n else: raise\n np.savetxt(\"MAGNETIZATION_X.OUT\",np.matrix([h.geometry.x,h.geometry.y,mx]).T)\n np.savetxt(\"MAGNETIZATION_Y.OUT\",np.matrix([h.geometry.x,h.geometry.y,my]).T)\n np.savetxt(\"MAGNETIZATION_Z.OUT\",np.matrix([h.geometry.x,h.geometry.y,mz]).T)", "def compute_magnetic_field(self, coords, params={}, basis=\"rpz\"):", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber*r\n ikr = 1j*kr\n\n front_term = self.moment / (4. * np.pi * r**3) * np.exp(-ikr)\n symmetric_term = (\n spatial.repeat_scalar(self.dot_orientation(dxyz)) * dxyz *\n (-kr**2 + 3*ikr + 3) / r**2\n )\n oriented_term = (\n (kr**2 - ikr - 1) *\n np.kron(self.orientation, np.ones((dxyz.shape[0], 1)))\n )\n\n return front_term * (symmetric_term + oriented_term)", "def magnetisation(field):\n norm_field = df.Field(field.mesh, dim=1, value=(field.norm.array != 0))\n volume = df.integral(norm_field * df.dV, direction='xyz')\n return df.integral(field * df.dV / volume, direction='xyz')", "def magnetic_field(self, xyz):\n dxyz = self.vector_distance(xyz)\n r = spatial.repeat_scalar(self.distance(xyz))\n kr = self.wavenumber * r\n ikr = 1j*kr\n\n front_term = (\n self.current * self.length / (4 * np.pi * r**2) * (ikr + 1) *\n np.exp(-ikr)\n )\n return -front_term * self.cross_orientation(dxyz) / r", "def magnetometer(self):\n self._mag[X] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_X_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_X_L_M), 16)\n self._mag[Y] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Y_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Y_L_M), 16)\n self._mag[Z] = twos_comp(self.i2c_bus.read_byte_data(self.addr, OUT_Z_H_M) << 8 | \n self.i2c_bus.read_byte_data(self.addr, OUT_Z_L_M), 16)\n\n return vector(self._mag)", "def write_dftb_in(self, outfile):\n\n outfile.write('Geometry = GenFormat { \\n')\n outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0)\n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n previous_key = key\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n outfile.write('ParserOptions { \\n')\n outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n outfile.write('} \\n')", "def add_altitude():\n\n doc = Metashape.app.document\n if not len(doc.chunks):\n raise Exception(\"No chunks!\")\n\n # alt = Metashape.app.getFloat(\"Please specify the height to be added:\", 100)\n alt = float(sys.argv[1])\n\n\n chunk = doc.chunk\n\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = Metashape.Vector([coord.x, coord.y, coord.z + alt])\n print(\"Add : \"+str(sys.argv[1]))", "def sunpos_mag(t,lat,lon,elev,gm,temp=None,press=None,radians=True):\n #az_zen is a (...,5) dimension ndarray\n az_zen = sunpos(t,lat,lon,elev,temp,press,radians=radians)\n decl = declination(lat,lon,elev,t,gm,radians)\n az_zen[...,0] -= decl\n #subtract declination to go from true N to magnetic N\n return az_zen", "def __call__(self, coords, params={}, basis=\"rpz\"):\n return self.compute_magnetic_field(coords, params, basis)", "def calcMagneticFieldMap(self):\n # Normalised b-field (note lower case)\n self.solenoid.calcMagneticFieldMap()\n self.b = lambda z: self.solenoid.B_interp(z) * -e / (2 * m * c)\n self.calc_level = CALC_B_MAP", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n return self._scalar * self._field.compute_magnetic_field(coords, params, basis)", "def phot_logg(Teff,mag0,BCmag,distmod,Mstar=0.75):\n return 4.44 + np.log10(Mstar) + 4*np.log10(Teff/5780) + 0.4 * (mag0 - distmod + BCmag - 4.75)", "def write_dftb_in(self, filename):\n\n outfile = open(filename, 'w')\n outfile.write('Geometry = GenFormat { \\n')\n #outfile.write(' <<< \"geo_end.gen\" \\n')\n outfile.write(' <<< %s \\n' %self.geo_fname)\n outfile.write('} \\n')\n outfile.write(' \\n')\n\n params = self.parameters.copy()\n\n s = 'Hamiltonian_MaxAngularMomentum_'\n for key in params:\n if key.startswith(s) and len(key) > len(s):\n break\n else:\n # User didn't specify max angular mometa. Get them from\n # the .skf files:\n symbols = set(self.atoms.get_chemical_symbols())\n for symbol in symbols:\n path = os.path.join(self.slako_dir,\n '{0}-{0}.skf'.format(symbol))\n l = read_max_angular_momentum(path)\n params[s + symbol] = '\"{}\"'.format('spdf'[l])\n\n # --------MAIN KEYWORDS-------\n previous_key = 'dummy_'\n myspace = ' '\n for key, value in sorted(params.items()):\n current_depth = key.rstrip('_').count('_')\n previous_depth = previous_key.rstrip('_').count('_')\n for my_backsclash in reversed(\n range(previous_depth - current_depth)):\n outfile.write(3 * (1 + my_backsclash) * myspace + '} \\n')\n outfile.write(3 * current_depth * myspace)\n if key.endswith('_') and len(value) > 0:\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth == 0): # E.g. 'Options {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' ' + str(value) + '{ \\n')\n elif (key.endswith('_') and (len(value) == 0) \n and current_depth > 0): # E.g. 'Hamiltonian_Max... = {'\n outfile.write(key.rstrip('_').rsplit('_')[-1] +\n ' = ' + str(value) + '{ \\n')\n elif key.count('_empty') == 1:\n outfile.write(str(value) + ' \\n')\n elif ((key == 'Hamiltonian_ReadInitialCharges') and \n (str(value).upper() == 'YES')):\n f1 = os.path.isfile(self.directory + os.sep + 'charges.dat')\n f2 = os.path.isfile(self.directory + os.sep + 'charges.bin')\n if not (f1 or f2):\n print('charges.dat or .bin not found, switching off guess')\n value = 'No'\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n else:\n outfile.write(key.rsplit('_')[-1] + ' = ' + str(value) + ' \\n')\n # point\n if self.pcpot is not None and ('DFTB' in str(value)):\n outfile.write(' ElectricField = { \\n')\n outfile.write(' PointCharges = { \\n')\n outfile.write(\n ' CoordsAndCharges [Angstrom] = DirectRead { \\n')\n outfile.write(' Records = ' +\n str(len(self.pcpot.mmcharges)) + ' \\n')\n outfile.write(\n ' File = \"dftb_external_charges.dat\" \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n outfile.write(' } \\n')\n previous_key = key\n\n current_depth = key.rstrip('_').count('_')\n for my_backsclash in reversed(range(current_depth)):\n outfile.write(3 * my_backsclash * myspace + '} \\n')\n #outfile.write('ParserOptions { \\n')\n #outfile.write(' IgnoreUnprocessedNodes = Yes \\n')\n #outfile.write('} \\n')\n #if self.do_forces:\n # outfile.write('Analysis { \\n')\n # outfile.write(' CalculateForces = Yes \\n')\n # outfile.write('} \\n')\n\n outfile.close()", "def pm_gal2eq(self, long_in=\"ra\", lat_in=\"dec\", pm_long=\"pm_l\", pm_lat=\"pm_b\", pm_long_out=\"pm_ra\", pm_lat_out=\"pm_dec\",\n name_prefix=\"__proper_motion_gal2eq\",\n right_ascension_galactic_pole=192.85,\n declination_galactic_pole=27.12,\n propagate_uncertainties=False,\n radians=False,\n inplace=False):\n kwargs = dict(**locals())\n kwargs.pop('self')\n kwargs['inverse'] = True\n return self.pm_eq2gal(**kwargs)", "def updateNameAndDescription(self, name, desc):\n self.magneticfield.name = name\n self.magneticfield.description = desc\n\n self.magneticfield.writeFile()", "def getMagneticFieldMap(self):\n return self.solenoid.B_interp(self.z_array)", "def lat_lon_to_nasadem_tile(lat,lon,current_dem_list=None):\n\n # A tile name looks like:\n #\n # NASADEM_NUMNC_n00e016.nc\n #\n # The translation from lat/lon to that string is represented nicely at:\n #\n # https://dwtkns.com/srtm30m/\n\n # Force download of the file list\n nasadem_file_list = get_nasadem_file_list(current_dem_list)\n\n ns_token = 'n' if lat >=0 else 's'\n ew_token = 'e' if lon >=0 else 'w'\n\n lat_index = abs(math.floor(lat))\n lon_index = abs(math.floor(lon))\n\n lat_string = ns_token + '{:02d}'.format(lat_index)\n lon_string = ew_token + '{:03d}'.format(lon_index)\n\n filename = nasadem_file_prefix + lat_string + lon_string + \\\n nasadem_content_extension\n\n if filename not in nasadem_file_list:\n print('Lat/lon {},{} not available'.format(lat,lon))\n filename = None\n\n return filename", "def magnetometer(self):\n self.com.reset_input_buffer()\n self.com.write(self.HEADER + self.MAG + self.END)\n header = self.com.read(1)\n if header != self.HEADER:\n print \"Got bad header from Arduino\"\n raise ArduinoError()\n data = ''\n while len(data) < 15:\n read_data = self.com.read(1)\n if len(read_data) != 1:\n print \"Error reading from Arduino\"\n raise ArduinoError()\n data += read_data\n if read_data == self.END:\n break\n print \"Arduino mag data:\", data\n mag_x = int(data[:data.index(',')])\n mag_y = int(data[data.index(',') + 1:-1])\n return mag_x, mag_y", "def add_mag(self, ra, dec, mag, mag_err, filt, mjd):\n \n pt = Table(names=self.__mag_colnames, \n data=[[ra],[dec],[mag],[mag_err],[filt],[mjd]])\n \n LightCurve.add_tables(self, pt)", "def compute_magnetic_field(self, coords, params=None, basis=\"rpz\"):\n assert basis.lower() in [\"rpz\", \"xyz\"]\n if isinstance(coords, Grid):\n coords = coords.nodes\n coords = jnp.atleast_2d(coords)\n if basis == \"xyz\":\n coords = xyz2rpz(coords)\n\n R, phi, Z = coords.T\n r = jnp.sqrt((R - self._R0) ** 2 + Z**2)\n theta = jnp.arctan2(Z, R - self._R0)\n br = -r * jnp.sin(theta)\n bp = jnp.zeros_like(br)\n bz = r * jnp.cos(theta)\n bmag = self._B0 * self._iota / self._R0\n B = bmag * jnp.array([br, bp, bz]).T\n if basis == \"xyz\":\n B = rpz2xyz_vec(B, phi=coords[:, 1])\n\n return B", "def get_mag(self):\n raise NotImplementedError", "def M(latitude):\n return a*(1.0-e2)/pow((1.0-e2)*pow(math.sin(latitude),2.0),3.0/2.0);", "def pm_eq2gal(self, long_in=\"ra\", lat_in=\"dec\", pm_long=\"pm_ra\", pm_lat=\"pm_dec\", pm_long_out=\"pm_l\", pm_lat_out=\"pm_b\",\n name_prefix=\"__proper_motion_eq2gal\",\n right_ascension_galactic_pole=192.85,\n declination_galactic_pole=27.12,\n propagate_uncertainties=False,\n radians=False, inverse=False,\n inplace=False):\n \"\"\"mu_gb = mu_dec*(cdec*sdp-sdec*cdp*COS(ras))/cgb $\n - mu_ra*cdp*SIN(ras)/cgb\"\"\"\n df = self.df if inplace else self.df.copy()\n long_in_original = long_in = df._expr(long_in)\n lat_in_original = lat_in = df._expr(lat_in)\n pm_long = df._expr(pm_long)\n pm_lat = df._expr(pm_lat)\n if not radians:\n long_in = long_in * np.pi/180\n lat_in = lat_in * np.pi/180\n c1_name = name_prefix + \"_C1\"\n c2_name = name_prefix + \"_C2\"\n right_ascension_galactic_pole = math.radians(right_ascension_galactic_pole)\n declination_galactic_pole = math.radians(declination_galactic_pole)\n df[c1_name] = c1 = np.sin(declination_galactic_pole) * np.cos(lat_in) - np.cos(declination_galactic_pole)*np.sin(lat_in)*np.cos(long_in-right_ascension_galactic_pole)\n df[c2_name] = c2 = np.cos(declination_galactic_pole) * np.sin(long_in - right_ascension_galactic_pole)\n c1 = df[c1_name]\n c2 = df[c2_name]\n if inverse:\n df[pm_long_out] = ( c1 * pm_long + -c2 * pm_lat)/np.sqrt(c1**2+c2**2)\n df[pm_lat_out] = ( c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)\n else:\n df[pm_long_out] = ( c1 * pm_long + c2 * pm_lat)/np.sqrt(c1**2+c2**2)\n df[pm_lat_out] = (-c2 * pm_long + c1 * pm_lat)/np.sqrt(c1**2+c2**2)\n if propagate_uncertainties:\n df.propagate_uncertainties([df[pm_long_out], df[pm_lat_out]])\n return df", "def _write_antti_location(lat, lon, rad, label, location_file):\n if location_file.split('.')[-1] == 'gz':\n ff = gzip.open(location_file, 'w')\n else:\n ff = open(location_file, 'w')\n\n ff.write(\"%% Geographic coordinates of the geoelectric field distribution \" +\n \" Data produced on %s\\n\"%(dt.datetime.utcnow()))\n ff.write(\"%% \\n\")\n ff.write(\"%% This data comes together with files DateTime.txt, B?.txt,\" +\n \" and Stations.txt. \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% Contact: \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% The format of the data is as follows:\\n\")\n ff.write(\"%% \\n\")\n ff.write(\"%% lat1 lon1 rad1 label1 \\n\")\n ff.write(\"%% lat2 lon2 rad2 label2 \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% . . . \\n\")\n ff.write(\"%% \\n\")\n ff.write(\"\\n\")\n\n for l in range(len(lat)):\n ff.write(\"%02.2f %02.2f %08e %s\\n\"%(lat[l], lon[l], rad[l], label[l]))\n\n ff.close()", "def add_refmag(self, ra, dec, mag, filt, mjd, mag_err=None):\n\n rm = Table(names=self.__mag_colnames, \n data=[[ra],[dec],[mag],[mag_err],[filt],[mjd]]) \n \n self.__ref_mags.add_row(rm[0])", "def write_nmea(rmc_1, gga_1, gst_1, zda_1, gga_2, hdt, f1, f2, fh):\n f1.write(rmc_1)\n f1.write(gga_1)\n f1.write(gst_1)\n if zda_1:\n f1.write(zda_1)\n \n\n f2.write(gga_2)\n\n fh.write(hdt)", "def add_altitude(chunk, flightHeightFile): \n # Get the flight height\n try:\n # flightHeightFile = \"/SNOWDATA/SnowDrones-Processing/LDP/01-31-2020/RGB/100MEDIA/FlightHeight.txt\"\n with open(flightHeightFile , 'r') as myfile:\n data = myfile.read()\n alt = int(data)\n except:\n alt = int(55)\n\n # Update flight altitudes\n for camera in chunk.cameras:\n if camera.reference.location:\n coord = camera.reference.location\n camera.reference.location = PhotoScan.Vector([coord.x, coord.y, alt])", "def plot_phot_transform(params, inst_mag, cal_mag, bandpass):\n\n fig = plt.figure(2)\n\n plt.plot(cal_mag, inst_mag,'k.')\n\n plt.xlabel('Catalog magnitude')\n\n plt.ylabel('Instrumental magnitude')\n\n plt.title('Relation between instrumental and catalogue magnitudes in '+\\\n bandpass)\n\n [xmin,xmax,ymin,ymax] = plt.axis()\n\n plt.axis([xmax,xmin,ymax,ymin])\n\n plt.savefig(path.join(params['red_dir'],\n 'phot_transform_'+bandpass+'.eps'))\n\n plt.close(2)", "def modelmag(teff,band,distance=10,AV=0.0,RV=3):\n if band not in PASSBANDS:\n raise ValueError('%s is unrecognized bandpass.' % band)\n\n distance = atleast_1d(distance)\n AV = atleast_1d(AV)\n #AV = AV * distance/1000.\n \n\n if RV==5:\n A = AV*EXTINCTION5[band]\n else:\n A = AV*EXTINCTION[band]\n\n if size(distance) > 1 or size(AV) > 1:\n teff = atleast_1d(teff)\n dm = distancemodulus(distance)\n M = MAGFN[band](teff)\n D = dm[:,newaxis]\n A = A[:,newaxis,newaxis]\n #A = resize(A,(M.shape[1],M.shape[0])).T\n #A = A[:,newaxis]\n else:\n M = MAGFN[band](teff)\n D = distancemodulus(distance)\n\n \n res = M+D+A\n if size(res) == 1:\n return res[0]\n else:\n return res" ]
[ "0.6234416", "0.6036381", "0.56869453", "0.5671633", "0.5600399", "0.55292517", "0.55019677", "0.544296", "0.53621125", "0.5302858", "0.5279656", "0.51781017", "0.50963485", "0.5032904", "0.500878", "0.49855933", "0.49819586", "0.49788126", "0.49635312", "0.49299306", "0.4889536", "0.48763138", "0.48636457", "0.48633182", "0.48389623", "0.48374313", "0.48343936", "0.48318705", "0.48309672", "0.48239267" ]
0.79384327
0
Calculate a checksum for num using the Luhn algorithm.
def luhn_checksum(num: str) -> str: check = 0 for i, s in enumerate(reversed(num)): sx = int(s) if i % 2 == 0: sx *= 2 if sx > 9: sx -= 9 check += sx return str(check * 9 % 10)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def checksum(n):\n\n # Compute the sum of the non-check digits.\n s = sum(luhn_digits(n * 10))\n\n # Multiply by 9.\n result = s * 9\n\n # The units digit is the check digit\n check_digit = result % 10\n\n m = int(str(n) + str(check_digit))\n assert(verify(m))\n\n return check_digit", "def __calculate_checksum(cls, number) -> str:\n # TODO in future stages, this function will use the Luhn algorithm to create checksum\n return str(sum(int(num) for num in str(number)) % 10)", "def luhn_checksum(card_number):\n def _double_and_sum_digits(d):\n s = d * 2\n result = s if s < 10 else (s - 9)\n return result\n\n mapped_digits = [\n d if index % 2 == 0 else _double_and_sum_digits(d)\n for index, d\n in enumerate(reversed(digits_of(card_number)))\n ]\n\n checksum = sum(mapped_digits) % 10\n return checksum", "def luhn_checksum(card_number):\n \n # Convert number into a list so we can edit each index value\n num = [int(x) for x in str(card_number)]\n \n # Step 1: multiply each odd index by 2 \n for i in range(0, 15, 2): # len(num) was falling one short so resorted to using int\n num[i] *= 2\n \n # Step 2: subtract 9 from any numbers greater than 9\n for i in range(0, 15):\n if num[i] > 9:\n num[i] -= 9\n else:\n continue\n \n # Step 3: total the 15 digits \n total = 0\n for i in range(0, 15):\n total += num[i]\n \n # Step 4: multiply total by 9 and take the last digit which is our checksum\n total_2 = total * 9\n string_total_2 = str(total_2)\n checksum = string_total_2[-1]\n \n return checksum", "def luhn_algo(num):\n sum = 0\n num_string = str(num) # Converts num into string type\n # Starts with second to last digit\n # iterates by -2 until length of string is reached\n for i in range(-2, -len(num_string) - 1, -2):\n dig_product = int(num_string[i]) * 2\n if dig_product > 9: # If product is 2 digits, sum both individual digits\n sum += dig_product % 10\n sum += dig_product // 10 # int division to get first digit\n else:\n sum += dig_product % 10\n for i in range(-1, -len(num_string) - 1, -2):\n sum += int(num_string[i])\n return sum", "def create_checksum_luhn_algorithm(self, number):\n\n # Luhn Algorithm:\n\n # Create a list with the first 15 digits of card number\n list_15 = [int(x) for x in number]\n\n # Step - Multiply odd digits by 2\n list_15_double_odds = [el * 2 if (n + 1) % 2 != 0 else el for n, el in enumerate(list_15)]\n\n # Step - Subtract 9 to numbers over 9\n list_15_minus_9 = [el - 9 if el > 9 else el for el in list_15_double_odds]\n\n # Step - Add all numbers and infer the check digit\n modulo = sum(list_15_minus_9) % 10\n last_digit = 10 - modulo if modulo != 0 else modulo\n\n return str(last_digit)", "def checksum(number):\n return sum(i * int(n) for i, n in enumerate(reversed(number), 1)) % 11", "def luhn_sum(n):\n rest_of_num, last_num = split(n)\n\n if rest_of_num == 0:\n \treturn last_num\n else:\n \treturn last_num + luhn_double_sum(rest_of_num)", "def test_luhn_checksum(self):\n check_digit = calculate_luhn(\"7992739871\")\n assert check_digit == 3", "def luhn(card_number):\n card_number = str(card_number)\n sum = 0\n num_digits = len(card_number)\n odd_even = num_digits & 1\n\n for i in range(0, num_digits):\n digit = int(card_number[i])\n if not (( i & 1 ) ^ odd_even ):\n digit = digit * 2\n if digit > 9:\n digit = digit - 9\n sum = sum + digit\n\n return (sum % 10) == 0", "def calculate_luhn_check_digit(partial_card_number):\n checksum = luhn_checksum(int(partial_card_number) * 10)\n if checksum == 0:\n check_digit = 0\n else:\n check_digit = 10 - checksum\n return check_digit", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def luhn_double_sum(n):\n rest_of_num, last_num = split(n)\n\n doubled_last_num = last_num*2\n if (doubled_last_num) >= 10:\n \tdoubled_last_num = sum_of_digits(doubled_last_num)\n if rest_of_num == 0:\n \treturn doubled_last_num\n return doubled_last_num + luhn_sum(rest_of_num)", "def calculate_checksum(code):\n\n sum_odd = reduce(sum_chars, code[::2])\n sum_even = reduce(sum_chars, code[1:-1:2])\n check = (sum_even + sum_odd * 3) % 10\n\n if check == 0:\n return 0\n else:\n return 10 - check", "def test_invalid_luhn(self):\n assert luhn_checksum(\"79927398714\") != 0", "def cardLuhnChecksumIsValid(card_number):\n sum = 0\n num_digits = len(card_number)\n oddeven = num_digits & 1\n for count in range(0, num_digits):\n digit = int(card_number[count])\n if not (( count & 1 ) ^ oddeven ):\n digit = digit * 2\n if digit > 9:\n digit = digit - 9\n sum = sum + digit\n return ( (sum % 10) == 0 )", "def checksum(number):\n c = 0\n for i, item in enumerate(reversed(str(number))):\n c = dTable[c][pTable[i % 8][int(item)]]\n return c", "def checksum (upc):\n\n # check type of input\n # raise TypeError if not string\n\n # xxxxxxxxxxx x\n # check length of string\n # raise ValueError if not 12\n\n # convert string to array\n # generate checksum using the first 11 digits provided\n # check against the the twelfth digit\n # result of first 11 digits must be consistent with the value of the 12th digit\n # value must be number\n\n # return True if they are equal, False otherwise\n num = []\n #\"123456\" --> \"1\" \"2\" \"3\" \"4\" \"5\" \"6\" --> num = [1,2,3,4,5,6] --> num[0] = 1, num[3] = 4\n if type(upc) is str:\n for i in range(0, len(upc)):\n try:\n num.append(int(upc[i]))\n except ValueError:\n raise ValueError(\"Not correct length\")\n # if upc[i] is not number checksum('1b2')\n else:\n raise TypeError(\"Invalid type passed as parameter\")\n #raiseError\n\n if len(num) != 12:\n raise ValueError(\"Not correct length\")\n\n\n odd, even = num[::2], num[1::2]\n result = 0\n for i in range(0,len(odd)):\n result = result + odd[i]\n\n result *= 3\n\n # This is to add even numbered digits\n for i in range(0, (len(even)-1)):\n result = result + even[i]\n\n result %= 10\n if result != 0:\n result = 10 - result\n\n if result == num[11]:\n return True\n\n return False", "def luhn_verifica(ccred):\n \n # Primeiro criamos uma nova cadeia, n, com os digitos do cartao de credito sem o de controle.\n # Usamos a funcao calc_soma para somar os digitos do cartao de acordo com o algoritmo de Luhn e juntamos o digito de controle. Caso este ultimo nao seja um algarismo, chamamos a atencao ao utilizador para o erro.\n # Caso o resto da divisao por 10 seja 0, a funcao devolve o valor logico True. \n \n\n n = ccred[:-1]\n dig_verificacao = ccred[-1]\n \n if '0' <= dig_verificacao <= '9':\n soma = calc_soma(n) + eval(dig_verificacao)\n \n else:\n raise ValueError ('function luhn_verifica() O string recebido apenas pode conter digitos') \n \n return soma % 10 == 0", "def _get_checksum(code: str) -> int:\r\n total = 0\r\n\r\n for index, digit in enumerate(code):\r\n digit = int(digit)\r\n if (index + 1) % 2 != 0:\r\n digit *= 2\r\n if digit > 9:\r\n digit -= 9\r\n total += digit\r\n\r\n checksum = 10 - total % 10\r\n\r\n return checksum if checksum != 10 else 0", "def checksum(n):\n return zlib.crc32(n.to_bytes(int(math.log2(n)), \"big\"))", "def calc_check_digit(number):\n number = compact(number)\n alphabet = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'\n weights = (29, 23, 19, 17, 13, 7, 5, 3)\n if not isdigits(number):\n number = number[0] + str('ABCEHKMOPT'.index(number[1])) + number[2:]\n c = sum(w * alphabet.index(n) for w, n in zip(weights, number)) % 11\n if c > 9:\n raise InvalidChecksum()\n return str(c)", "def create_checksum(self, fifteen_digit):\n duplicate_odd_digits = [int(fifteen_digit[i - 1]) * 2 if i % 2 else\n int(fifteen_digit[i - 1]) for i in range(1, 16)]\n subtract_nine = [digit - 9 if digit > 9 else digit for digit in duplicate_odd_digits]\n sum_up = sum(subtract_nine)\n return (10 - sum_up % 10) % 10", "def luhn_digits(n):\n\n digits = [int(i) for i in str(n)]\n\n # First, reverse the list of digits.\n digits.reverse()\n\n # Double the value of every second digit.\n digits = apply_to_odd_positions(double, digits)\n\n # If the result of this doubling operation is greater than 9 then\n # add the digits of the result.\n digits = apply_to_odd_positions(sum_digits, digits)\n\n return digits", "def generate_check_sum(account_id):\n card_number = str(Bank.iin) + str(account_id).zfill(9)\n step_one = []\n for i in range(len(card_number)):\n digit = int(card_number[i])\n if i % 2 == 0:\n digit *= 2\n if digit > 9:\n digit -= 9\n step_one.append(digit)\n step_two = sum(step_one)\n remainder = step_two % 10\n check_sum = (10 - remainder) if remainder else remainder\n return check_sum", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def ean_checksum(eancode):\n if len(eancode) <> 13:\n return -1\n oddsum=0\n evensum=0\n total=0\n eanvalue=eancode\n reversevalue = eanvalue[::-1]\n finalean=reversevalue[1:]\n\n for i in range(len(finalean)):\n if i % 2 == 0:\n oddsum += int(finalean[i])\n else:\n evensum += int(finalean[i])\n total=(oddsum * 3) + evensum\n\n check = int(10 - math.ceil(total % 10.0)) %10\n return check", "def checksum(code):\n return sum(code) % 256", "def checksum(upc):\n\n # check type of input\n if type(upc) != str:\n # raise TypeError if not string\n raise TypeError(\"Input must be a string\")\n # check length of string\n elif len(upc) != 12:\n # raise ValueError if not 12\n raise ValueError(\"Invalid UPC length\")\n # generate checksum using the first 11 digits provided\n else:\n # add the odd digits together\n odd_digits = upc[::2]\n odd_sum = sum([int(x) for x in odd_digits])\n\n # add the even digits together (12th digit not included)\n even_digits = upc[1:-1:2]\n even_sum = sum([int(x) for x in even_digits])\n\n # multiply the odd sum by 3, add that to the even sum and\n # find the modulo 10 of the result\n mod = ((odd_sum * 3) + even_sum) % 10\n\n # if the result is not 0, subtract the result from 10\n checksum_digit = 0\n if mod != 0:\n checksum_digit = 10 - mod\n\n # check against the twelfth digit\n # return True if they are equal, False otherwise\n return int(upc[11]) == checksum_digit", "def calculate_checksum(self, data):\n\t\tdata = data[2:] # Ignore start tokens ($$)\n\t\tcrc16 = crcmod.predefined.mkCrcFun('crc-ccitt-false')\n\t\treturn hex(crc16(data))[2:].upper().zfill(4)" ]
[ "0.8084448", "0.80643237", "0.78999513", "0.7894059", "0.7728619", "0.75503194", "0.7511611", "0.7499676", "0.74020904", "0.71649796", "0.7110696", "0.6942625", "0.6888765", "0.68604416", "0.6764696", "0.6605598", "0.65903616", "0.6547519", "0.64829504", "0.64828515", "0.64539576", "0.634994", "0.6316882", "0.620428", "0.61776173", "0.6171808", "0.6129179", "0.6102033", "0.6090637", "0.6067569" ]
0.84606624
0
Romanize a given string.
def romanize(string: str, locale: t.Union[Locale, str]) -> str: locale = validate_locale(locale) if locale not in (Locale.RU, Locale.UK, Locale.KK): raise ValueError(f"Romanization is not available for: {locale}") table = _get_translation_table(locale) return string.translate(table)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fromRoman(s):\n pass", "def toRoman(n):\n pass", "def romanify(num):\n result = \"\"\n return result", "def fromRoman(s):\n if not s:\n raise InvalidRomanNumeralError, 'Input can not be blank'\n if not romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError, 'Invalid Roman numeral: %s' % s\n\n result = 0\n index = 0\n for numeral, integer in romanNumeralMap:\n while s[index:index+len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result", "def toRoman(n):\n if not isinstance(n, int):\n raise NorIntegerError(\"decimals can not be converted\")\n if not (0 < n < 5000):\n raise OutOfRangeError(\"number out of range (must be 1..4999)\")\n \n result = \"\"\n for numeral, integer in romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def from_roman(s: str) -> Integral:\n if not isinstance(s, str):\n raise TypeError(\"The argument to from_roman must be a string.\")\n if not _romanNumeralPattern.search(s):\n raise InvalidRomanNumeralError(f\"Invalid Roman numeral: {s}\")\n\n result = 0\n index = 0\n for numeral, integer in _romanNumeralMap:\n while s[index : index + len(numeral)] == numeral:\n result += integer\n index += len(numeral)\n return result", "def toRoman(n):\n result = \"\"\n for numeral, integer in romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def arabic_to_roman_replace(test_word):\n arabic_word = test_word\n\n \"\"\"\n testWord = \"دَرْس\"\n print(testWord, sep=\" \")\n \"\"\"\n\n # converting into roman characters\n print()\n roman_characters = \"\"\n for letter in arabic_word:\n #print(letter)\n if letter in dictionary:\n roman_characters += Dictionary.get_lebanese_to_roman_char(letter) #.roman_text #Dictionary.get_lebanese_to_roman_char(letter)\n elif letter in dictionary_vowels:\n roman_characters += dictionary_vowels[letter]\n # print(\"test\")\n elif letter == \" \":\n roman_characters += \" \"\n return roman_characters\n ####print(str(roman_characters), str(test_word))\n\n # مِلْء\n #####plans for improving the program#####", "def int_to_roman(i):\n result = []\n for integer, numeral in NUMERAL_MAP:\n count = i // integer\n result.append(numeral * count)\n i -= integer * count\n return ''.join(result)", "def to_roman(n: Union[Integral, np.integer]) -> str:\n if not isinstance(n, (Integral, np.integer)):\n raise TypeError(f\"{n} cannot be converted to a Roman numeral.\")\n if not (0 < n < 5000):\n raise OutOfRangeError(\"Number is out of range (need 0 < n < 5000)\")\n\n result = \"\"\n for numeral, integer in _romanNumeralMap:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def decToRoman(numStr):\n try:\n n = int(numStr)\n if n >= 4000:\n return 'Error!'\n romans = [\n (1000, 'M'), (900, 'CM'), (500, 'D'), (400, 'CD'),\n (100, 'C'), (90, 'XC'), (50, 'L'), (40, 'XL'),\n (10, 'X'), (9, 'IX'), (5, 'V'), (4, 'IV'),\n (1, 'I')\n ]\n result = ''\n for value, letters in romans:\n while n >= value:\n result += letters\n n -= value\n return result\n except:\n result = 'Error!'\n return result", "def int2roman(i, lower=False):\n warn('The function int2roman is deprecated from JAMS. Use module pyjams.',\n category=DeprecationWarning)\n assert i >= 1, 'integer must be > 0.'\n result = []\n for integer, numeral in numeral_map:\n count = int(i // integer)\n result.append(numeral * count)\n i -= integer * count\n if lower: result = [ i.lower() for i in result ]\n return ''.join(result)", "def intToRoman(self, num: int) -> str:\n\n # Last remainder\n remainder = num\n\n # Initial string\n roman = \"\"\n\n # Loops through all remainder values\n for v in self.values:\n division = remainder // v\n remainder = remainder % v\n\n # Adds to the string only if division is not empty.\n if division != 0:\n roman += self.symbols[v] * division\n\n return roman", "def romanify(num):\n result = \"\"\n onesDict = {1:\"I\", 2: \"II\", 3: \"III\", 4: \"IV\", 5: \"V\", 6: \"VI\", 7: \"VII\", 8: \"VIII\", 9: \"IX\", 0:\"\"}\n ones = num%10\n num-=num%10\n result = onesDict[ones] + result\n tensDict = {10:\"X\", 20: \"XX\", 30: \"XXX\", 40:\"XL\", 50:\"L\", 60:\"LX\", 70: \"LXX\", 80: \"LXXX\", 90: \"XC\", 0:\"\"}\n tens = num%100\n num-=num%100\n result = tensDict[tens] + result\n hunsDict = {100:\"C\", 200: \"CC\", 300: \"CCC\", 400:\"CD\", 500:\"D\", 600:\"DC\", 700: \"DCC\", 800: \"DCCC\", 900: \"CM\", 0:\"\"}\n huns = num%1000\n num-=num%1000\n result = hunsDict[huns] + result\n thous = num/1000\n result = \"M\"*thous + result\n \n return result", "def formatRomanNumeral(rn, key):\n # Something of \"I\" and \"I\" of something\n if rn == \"I/I\":\n rn = \"I\"\n return rn", "def toRoman(dec):\t\t\n if dec <=0:\n\t raise ValueError, \"It must be a positive\"\n # to avoid MMMM\n\telif dec>=4000: \n\t raise ValueError, \"It must be lower than MMMM(4000)\"\n \n\treturn decToRoman(dec,\"\",decimalDens,romanDens)", "def num2roman(num):\n roman = ''\n while num > 0:\n for i, r in ROMAN_MAP:\n while num >= i:\n roman += r\n num -= i\n return roman", "def roman_to_int(roman_string):\n\n NUMERALS_SET = set(list(zip(*NUMERAL_MAP))[1])\n roman_string = roman_string.upper()\n if len(set(list(roman_string.upper())) - NUMERALS_SET) != 0:\n raise ValueError('{0} does not seem to be a roman numeral'.format(\n roman_string))\n i = result = 0\n for integer, numeral in NUMERAL_MAP:\n while roman_string[i:i + len(numeral)] == numeral:\n result += integer\n i += len(numeral)\n if result < 1:\n raise ValueError('Can not interpret Roman Numeral {0}'.format(roman_string))\n return result", "def int_to_roman(number):\n result = []\n\n for integer, numeral in NUMERAL_MAP:\n count = number // integer\n result.append(numeral * count)\n number -= integer * count\n\n return ''.join(result)", "def romanize(digit, glyphs):\n if 1 <= digit <= 3:\n return digit*glyphs[0]\n elif digit == 4:\n return glyphs[0] + glyphs[1]\n elif digit >= 5 and digit <= 8:\n return glyphs[1] + ((digit - 5) * glyphs[0])\n elif digit == 9:\n return glyphs[0]+glyphs[2]\n else:\n return ''", "def int2roman(num):\n try:\n num_int = int(num)\n except ValueError:\n raise InputError(num, \"Input value must be in integer representation.\")\n except TypeError:\n raise InputError(num, \"Input must be a number, string, or a bytes-like object.\")\n if float(num) != float(num_int):\n raise InputError(num, \"Input cannot be a non-integer decimal value.\")\n else:\n num = int(num)\n if not 0 < num < 5000:\n raise InputError(num, \"Input must be an integer in [1,4999] range.\")\n\n res = \"\"\n for r, i in __extended_map:\n while num >= i:\n res += r\n num -= i\n return res", "def int_to_roman(input_rom): # Konverter v rimske številke. Nisem avtor te funkcije.\n ints = (1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1)\n nums = ('M', 'CM', 'D', 'CD','C', 'XC','L','XL','X','IX','V','IV','I')\n result = []\n for i in range(len(ints)):\n count = int(input_rom / ints[i])\n result.append(nums[i] * count)\n input_rom -= ints[i] * count\n return ''.join(result)", "def _int_to_roman(self, i):\n numeral_map = zip((1000, 900, 500, 400, 100, 90, 50, 40, 10, 9, 5, 4, 1),\n ('M', 'CM', 'D', 'CD', 'C', 'XC', 'L', 'XL', 'X', 'IX', 'V', 'IV', 'I'))\n result = []\n for integer, numeral in numeral_map:\n count = int(i / integer)\n result.append(numeral * count)\n i -= integer * count\n return ''.join(result)", "def to_roman(n):\n if not isinstance(n, int):\n try:\n n = int(n)\n except ValueError:\n raise NotIntegerError(\"non-integers cannot be converted\")\n\n if not (0 < n < 4000):\n raise OutOfRangeError(\"number out of range (must be 1..3999)\")\n\n result = \"\"\n for numeral, integer in ROMAN_NUMBER_MAP:\n while n >= integer:\n result += numeral\n n -= integer\n return result", "def roman2int(s):\n if not s or not isinstance(s, str):\n raise InputError(s, \"Input value must be a non-empty string.\")\n elif __roman_numeral_regex.search(s) is None:\n raise InputError(s, \"Input is not a valid Roman numeral representation of numbers in the 1-4999 range.\")\n\n return sum([__bmap[i] if __bmap[i] >= __bmap[j] else -__bmap[i] for i, j in zip(s, s[1:])]) + __bmap[s[-1]]", "def to_roman(an_arabic):\n result = \"\"\n\n for level, symbol in [(1000,\"M\"),\n (900,\"CM\"),\n (500,\"D\"),\n (400,\"CD\"),\n (100,\"C\"),\n (90,\"XC\"),\n (50,\"L\"),\n (40,\"XL\"),\n (10,\"X\"),\n (9,\"IX\"),\n (5,\"V\"),\n (4,\"IV\"),\n (1,\"I\")]:\n\n while an_arabic >= level:\n result += symbol\n an_arabic -= level\n \n return result", "def to_roman(numeral):\n mapping = {\n 'M': 1000,\n 'CM': 900,\n 'D': 500,\n 'CD': 400,\n 'C': 100,\n 'XC': 90,\n 'L': 50,\n 'XL': 40,\n 'X': 10,\n 'IX': 9,\n 'V': 5,\n 'IV': 4,\n 'I': 1\n }\n romans = {v:k for k,v in mapping.items()}\n result = ''\n\n for divisor, symbol in romans.items():\n count = numeral // divisor\n remainder = numeral % divisor\n numeral = remainder\n result += symbol * count\n\n return result", "def convert_numerals(input_str):\n # credit to: http://code.activestate.com/recipes/81611-roman-numerals/\n copy = input_str[:]\n copy = copy.split(\" \")\n\n nums = ['m', 'd', 'c', 'l', 'x', 'v', 'i']\n ints = [1000, 500, 100, 50, 10, 5, 1]\n places = []\n\n for i in range(len(copy)):\n is_valid = True\n\n if \".\" in copy[i]:\n copy[i] = copy[i].replace(\".\", \"\")\n else:\n # . must be appended to end of string to signify it is a roman\n # numeral\n is_valid = False\n\n if \"xix\" in copy[i] or \"xviii\" in copy[i]:\n is_valid = True\n\n for c in copy[i].lower():\n if c not in nums:\n # return original\n is_valid = False\n\n if is_valid is False:\n continue\n\n for char_index in range(len(copy[i])):\n c = copy[i][char_index].lower()\n value = ints[nums.index(c)]\n # If the next place holds a larger number, this value is negative.\n try:\n nextvalue = ints[nums.index(copy[i][char_index + 1].lower())]\n if nextvalue > value:\n value *= -1\n except IndexError:\n # there is no next place.\n pass\n places.append(value)\n\n out = 0\n\n for n in places:\n out += n\n\n copy[i] = str(out)\n\n return \" \".join(copy)", "def _RomanToLatin(self, roman_numerals):\n roman = roman_numerals.strip().upper()\n return {'I': '1', 'II': '2', 'III': '3', 'IV': '4', 'V': '5'}[roman]", "def roman_to_int(self, s):\r\n if not s:\r\n return 0\r\n\r\n # Create hash table for Roman numerals\r\n d = self.make_reference()\r\n\r\n p = \"\"\r\n x = 0\r\n for c in s.upper():\r\n # Evaluate M (1000)\r\n if c == \"M\":\r\n if p == \"C\":\r\n p = \"CM\"\r\n else:\r\n p = \"M\"\r\n # Evaluate D (500)\r\n elif c == \"D\":\r\n if p == \"C\":\r\n p = \"CD\"\r\n else:\r\n p = \"D\"\r\n # Evaluate C (100)\r\n elif c == \"C\":\r\n if p == \"X\":\r\n p = \"XC\"\r\n else:\r\n p = \"C\"\r\n # Evaluate L (50)\r\n elif c == \"L\":\r\n if p == \"X\":\r\n p = \"XL\"\r\n else:\r\n p = \"L\"\r\n # Evaluate X (10)\r\n elif c == \"X\":\r\n if p == \"I\":\r\n p = \"IX\"\r\n else:\r\n p = \"X\"\r\n # Evaluate V (5)\r\n elif c == \"V\":\r\n if p == \"I\":\r\n p = \"IV\"\r\n else:\r\n p = \"V\"\r\n # Evaluate I (1)\r\n else:\r\n p = \"I\"\r\n \r\n x += d[p]\r\n\r\n return x" ]
[ "0.7063574", "0.689551", "0.6734362", "0.64427215", "0.63702637", "0.6330791", "0.6323522", "0.6299757", "0.6215566", "0.6198708", "0.61629105", "0.61244214", "0.6120783", "0.61043954", "0.60932904", "0.60877186", "0.6060893", "0.5971338", "0.5948232", "0.5926629", "0.5908616", "0.58727574", "0.58605164", "0.58496267", "0.5824362", "0.57582414", "0.5674157", "0.56615543", "0.5657796", "0.5635324" ]
0.7607582
0
Configure the logging system. If a logpath is provided, entries will also be written to that logfile.
def configure_logger(logpath, loglevel=logging.DEBUG): handlers = [logging.StreamHandler()] if logpath: handlers.append(logging.FileHandler(logpath)) logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%d-%m-%y %H:%M:%S', level=loglevel, handlers=handlers)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _configure_logging(self):\n self.log_level = Scaffold.LOG_LEVEL_MAP.get(self.log_level, ERROR)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # assign the windmill instance logger\n #logging.basicConfig()\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.log_level)\n\n if self.log_path:\n file_path = None\n if self.log_path.endswith('.log'):\n file_path = self.log_path\n else:\n file_path = os.path.join(self.log_path, self.name + '.log')\n assert file_path\n file_handler = logging.FileHandler(file_path)\n file_handler.setLevel(self.log_level)\n file_handler.setFormatter(formatter)\n self.log.addHandler(file_handler)\n\n # if we are in verbose mode, then we send log output to console\n if self.verbose:\n # add the console logger for verbose mode\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level)\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n\n self.log.info('Logging configured for: %s', self.name)", "def set_logger(log_path):\r\n logger = logging.getLogger()\r\n logger.setLevel(logging.INFO)\r\n\r\n if not logger.handlers:\r\n # Logging to a file\r\n file_handler = logging.FileHandler(log_path)\r\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\r\n logger.addHandler(file_handler)\r\n\r\n # Logging to console\r\n stream_handler = logging.StreamHandler()\r\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\r\n logger.addHandler(stream_handler)", "def set_logger(log_path):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path)\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)", "def set_logger(log_path):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n if not logger.handlers:\n # Logging to a file\n file_handler = logging.FileHandler(log_path, mode='w')\n file_handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s: %(message)s'))\n logger.addHandler(file_handler)\n\n # Logging to console\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(stream_handler)", "def _configure_logging(self):\n pass", "def _configure_logging(self, path, level):\n logging_format = (\n \"%(asctime)s : %(levelname)s : %(module)s.%(lineno)s : %(message)s\"\n )\n date_format = \"%Y/%m/%d %I:%M:%S %p\"\n\n log_formatter = logging.Formatter(logging_format, date_format)\n file_handler = logging.FileHandler(path, mode=\"w\", encoding=\"UTF-8\")\n file_handler.setFormatter(log_formatter)\n self.logger.addHandler(file_handler)\n self.logger.setLevel(self._logging_levels(level))", "def configure(base_path):\n\n log_path = os.path.join(\n base_path,\n 'logs',\n )\n current_time = datetime.datetime.now().strftime(\"%d.%m.%Y %H:%M:%S\")\n\n log_fmt = '%(asctime)s [%(threadName)-12.12s] [%(levelname)-3.4s] %(message)s'\n\n logging.basicConfig(\n level=logging.INFO,\n format=log_fmt,\n handlers=[\n TimedRotatingFileHandler(\n filename=f\"{log_path}/analysis-service.({current_time}).log\",\n encoding='utf-8',\n when=\"d\"\n ),\n logging.StreamHandler()\n ]\n )", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def configure_logging(log_level=None, log_fpath=None):\r\n\r\n # disable logging\r\n if not log_level:\r\n logging.disable()\r\n return\r\n\r\n log_level = log_level.upper()\r\n root_logger = logging.getLogger()\r\n root_logger.setLevel(log_level)\r\n\r\n # create formatter for the logs\r\n formatter = logging.Formatter(\"%(asctime)s :: %(levelname)s :: %(name)s :: %(funcName)s() :: %(message)s\")\r\n\r\n # create console logging handler and set its formatting, add it to the root logger\r\n ch = logging.StreamHandler()\r\n ch.setLevel(log_level)\r\n ch.setFormatter(formatter)\r\n root_logger.addHandler(ch)\r\n\r\n # create file logging handler and set its formatting, add it to the root logger\r\n if log_fpath:\r\n fh = logging.FileHandler(log_fpath)\r\n fh.setLevel(log_level)\r\n fh.setFormatter(formatter)\r\n root_logger.addHandler(fh)\r\n\r\n # print first log\r\n if log_fpath is None:\r\n root_logger.info(\"First log: logging to console at %s level.\" % logging.getLevelName(root_logger.getEffectiveLevel()))\r\n else:\r\n root_logger.info(\"First log: logging to console and %s at %s level\" %(log_fpath, logging.getLevelName(root_logger.getEffectiveLevel())))", "def _configure_logger():\n try:\n log_dir = os.environ['AUTOMINE_LOG_DIR']\n log_name = _log_name()\n cfg_path = os.path.join(log_dir, 'logging_config.json')\n with open(cfg_path) as src:\n cfg = json.load(src)\n handlers = cfg.get('handlers')\n for handler in iter(handlers.values()):\n filename = handler.get('filename')\n if filename:\n filename = filename.replace('{{AUTOMINE_LOG_DIR}}',\n log_dir)\n filename = filename.replace('{{__name__}}', log_name)\n handler['filename'] = filename\n loggers = cfg.get('loggers')\n if '__name__' in loggers:\n loggers[log_name] = loggers.pop('__name__')\n\n # add logging to the console if env var is set\n log_to_console = 'AUTOMINE_LOG_TO_CONSOLE' in os.environ\n if log_to_console and 'console' in handlers:\n logger_handlers = loggers[log_name].get('handlers')\n if logger_handlers:\n logger_handlers.append('console')\n\n dictConfig(cfg)\n except Exception as err: # pylint: disable=broad-except\n logging.basicConfig()\n raise err", "def initialize_logging(self):\n logging_config_path = self.pyleus_config.get('logging_config_path')\n if logging_config_path:\n logging.config.fileConfig(logging_config_path)\n elif os.path.isfile(DEFAULT_LOGGING_CONFIG_PATH):\n logging.config.fileConfig(DEFAULT_LOGGING_CONFIG_PATH)", "def configure_logging():\n class TimeFormatter(logging.Formatter):\n def formatTime(self, record, datefmt=None):\n datefmt = datefmt or '%Y-%m-%d %H:%M:%S'\n return time.strftime(datefmt, time.localtime(record.created))\n\n class SeverityFilter(logging.Filter):\n def filter(self, record):\n record.severity = record.levelname[0]\n return True\n\n if not os.path.exists(LOG_DIR):\n os.mkdir(LOG_DIR)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n log_file = logging.handlers.RotatingFileHandler(LOG_FILE, backupCount=100)\n log_file.addFilter(SeverityFilter())\n log_file.setFormatter(TimeFormatter('%(asctime)s %(severity)s: %(message)s'))\n logger.addHandler(log_file)\n\n # Log all uncaught exceptions.\n def log_exception(exception_type, value, stack_trace):\n logging.error(\n ''.join(traceback.format_exception(exception_type, value, stack_trace)),\n )\n sys.excepthook = log_exception\n\n # Rotate log files once on startup to get per-execution log files.\n if os.path.exists(LOG_FILE):\n log_file.doRollover()", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def setPath(logPath):\n GlobalLogger.logger.close()\n GlobalLogger.logger = FileLogger(logPath)", "def start_logging(self):\n text = _DEFAULT_LOG_CONFIG\n path = self.bindings.get('LOG_CONFIG', None)\n if path:\n try:\n with open(path, 'r') as f:\n text = f.read()\n except Exception as ex:\n print 'ERROR reading LOGGING_CONFIG from {0}: {1}'.format(path, ex)\n raise\n config = ast.literal_eval(args_util.replace(text, self.bindings))\n logging.config.dictConfig(config)\n log_path = os.path.join(\n self.bindings['LOG_DIR'], self.bindings['LOG_FILEBASE'] + '.log')\n os.chmod(log_path, 0600)\n\n self.__journal = global_journal.get_global_journal()\n if self.__journal is None:\n # force start\n journal_path = os.path.join(\n self.bindings['LOG_DIR'],\n self.bindings['LOG_FILEBASE'] + '.journal')\n self.__journal = global_journal.new_global_journal_with_path(journal_path)", "def configure_logging(logdir=None):\n logconfig = LOGCONFIG_DICT.copy()\n if logdir:\n debugfile = os.path.join(logdir, DEBUGFILE)\n logconfig['handlers']['debugfile']['filename'] = debugfile\n errorfile = os.path.join(logdir, ERRORFILE)\n logconfig['handlers']['errorfile']['filename'] = errorfile\n\n logging.config.dictConfig(logconfig)", "def setup_logging( cfg ):\n global _LOGGING_FORMAT_, _DATE_FORMAT_\n format,date = _LOGGING_FORMAT_,_DATE_FORMAT_\n \n if not cfg.get('logging', True):\n logging.basicConfig(handler=logging.NullHandler)\n return\n \n #check passed in cfgs if formats changed\n if cfg.get('log_format', False):\n format = cfg.get('log_format')\n if cfg.get('log_date_format',False):\n date = cfg.get('log_date_format')\n \n if cfg.get('log_debug', False):\n logging.basicConfig(level=logging.DEBUG,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path', 'errors.log'))\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n logging.getLogger().addHandler(console)\n \n elif cfg.get('log_warnings', False):\n logging.basicConfig(level=logging.WARNING,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))\n \n else:# Errors are always logged. deal.\n logging.basicConfig(level=logging.ERROR,\n format=format,\n datefmt=date,\n filename=cfg.get('log_path','errors.log'))", "def setup_logging(\n module,\n default_level=logging.INFO,\n env_key='LOG_CFG',\n logpath=os.getcwd(),\n config_path=None\n):\n\n if not os.path.exists(os.path.dirname(logpath)):\n os.makedirs(os.path.dirname(logpath))\n timestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H:%M\")\n fpath = os.path.join(logpath, module, timestamp)\n\n path = config_path if config_path is not None else os.getenv(env_key, None)\n if path is not None and os.path.exists(path):\n with open(path, 'rt') as f:\n config = yaml.safe_load(f.read())\n for h in config['handlers'].values():\n if h['class'] == 'logging.FileHandler':\n h['filename'] = os.path.join(logpath, module, timestamp, h['filename'])\n touch(h['filename'])\n for f in config['filters'].values():\n if '()' in f:\n f['()'] = globals()[f['()']]\n logging.config.dictConfig(config)\n else:\n lpath=os.path.join(logpath, timestamp)\n if not os.path.exists(lpath):\n os.makedirs(lpath)\n logging.basicConfig(level=default_level, filename=os.path.join(lpath,\"base.log\"))", "def setup_logging():\r\n import ConfigParser # change this to configparser for Python 3\r\n # import logging\r\n import logging.config\r\n global logger\r\n\r\n try:\r\n \tlogging.config.fileConfig(\"celog.conf\")\r\n except ConfigParser.NoSectionError: \r\n\t# if there is no configuration file setup a default configuration\r\n logging.basicConfig(filename='code_extract.log',level= _logging_level,\r\n\t\t\tformat='%(asctime)s %(levelname)s - %(message)s',\r\n\t\t\tdatefmt='%Y %b %d, %a %H:%M:%S'\r\n\t\t\t)\r\n \r\n logger = logging.getLogger('%s' % __name__)\r\n\r\n logger.debug('logger ready')", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def set_config(self, file_path_name):\n level = logging.DEBUG\n format = '%(asctime)s %(levelname)-8s %(message)s' \n datefmt = '%a, %d %b %Y %H:%M:%S'\n filemode = 'a'\n \n\n logging.basicConfig(level = level,\n format = format,\n datefmt = datefmt,\n filename = file_path_name,\n filemode = filemode)", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def logging_setup(args, log_dir):\n timestamp_file = datetime.now().strftime(\"%Y%m%d-%H.%M_rcf_abb.log\")\n log_file = Path(log_dir) / timestamp_file\n\n handlers = []\n\n if not args.skip_logfile:\n handlers.append(log.FileHandler(log_file, mode=\"a\"))\n if not args.quiet:\n handlers.append(log.StreamHandler(sys.stdout))\n\n log.basicConfig(\n level=log.DEBUG if args.debug else log.INFO,\n format=\"%(asctime)s:%(levelname)s:%(funcName)s:%(message)s\",\n handlers=handlers,\n )", "def logging_config(args):\n # Any handlers from a basicConfig, which we will reconfigure.\n for handler in logging.root.handlers:\n logging.root.removeHandler(handler)\n\n level = logging.INFO - 10 * args.verbose + 10 * args.quiet\n # The command-line logging level specifies what goes to stderr.\n root_handler = logging.StreamHandler(sys.stderr)\n fmt = \"%(levelname)s %(asctime)s %(pathname)s:%(lineno)d: %(message)s\"\n datefmt = \"%y-%m-%d %H:%M:%S\"\n root_handler.setFormatter(logging.Formatter(fmt, datefmt))\n root_handler.setLevel(level)\n logging.root.addHandler(root_handler)\n logging.root.setLevel(level)\n\n code_log = _logging_configure_root_log(args.root_dir / args.code_log, level)\n mvid = args.mvid if hasattr(args, \"mvid\") else \"mvid\"\n _logging_configure_mathlog(mvid, args.root_dir / args.epiviz_log)\n _logging_individual_modules(args.logmod, args.modlevel)\n if code_log: # Tell the math log people where the code log is located.\n logging.getLogger(\"cascade.math\").info(f\"Code log is at {code_log}\")", "def _setup_logging(self, config, channel):\r\n\r\n logfile = getattr(config, '%s_logfile' % channel)\r\n if not logfile:\r\n return\r\n\r\n maxbytes = getattr(config, '%s_logfile_maxbytes' % channel)\r\n backups = getattr(config, '%s_logfile_backups' % channel)\r\n fmt = '%(message)s'\r\n if logfile == 'syslog':\r\n warnings.warn(\"Specifying 'syslog' for filename is deprecated. \"\r\n \"Use %s_syslog instead.\" % channel, DeprecationWarning)\r\n fmt = ' '.join((config.name, fmt))\r\n self.mainlog = loggers.handle_file(\r\n config.options.getLogger(),\r\n filename=logfile,\r\n fmt=fmt,\r\n rotating=not not maxbytes, # optimization\r\n maxbytes=maxbytes,\r\n backups=backups)\r\n\r\n if getattr(config, '%s_syslog' % channel, False):\r\n fmt = config.name + ' %(message)s'\r\n loggers.handle_syslog(self.mainlog, fmt)", "def setup_logging_with_config(config: DynaBox):\n global logger\n logger = setup_logging_threatbus(config, logger_name)" ]
[ "0.7340265", "0.7313647", "0.7286824", "0.724973", "0.7248175", "0.7177587", "0.7139101", "0.70546126", "0.70544195", "0.7049995", "0.70458865", "0.70351976", "0.70171684", "0.70139897", "0.7012062", "0.7011974", "0.69839966", "0.69708276", "0.69193125", "0.68887985", "0.6874821", "0.686993", "0.6868416", "0.6829063", "0.6827787", "0.6824027", "0.6786854", "0.6775784", "0.6754536", "0.6749705" ]
0.8009328
0
Connect two nodes with a channel. Connects node a to node b using the given channel.
def connect(self, channel, a, b): a.sender.channels.append(channel) channel.receivers.append(b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def connectChannel(sock, chan):\n sock.send(\"JOIN {}\\r\\n\".format(chan).encode(\"utf-8\"))\n\n console.info(\"Successfully connected to {}\".format(chan))", "def connect(self, node1, node2):\n self.neighbour1 = node1\n self.neighbour2 = node2", "def connect_two_nodes(left_node: Node, right_node: Node) -> None:\n global _graph\n\n if left_node is None or right_node is None:\n return\n\n if left_node['category'] != 'person' and right_node['category'] != 'person':\n # This is not a person to person link, link directly\n edge1 = LINKS_TO(left_node, right_node)\n edge2 = LINKS_TO(right_node, left_node)\n\n # Do a \"merge\" instead of a \"create\" to prevent double edges.\n _graph.merge(edge1 | edge2, 'RCGNode', '_key')\n return\n\n # At least one of the nodes is a 'person' link. These should be linked via their 'person-root' node.\n if left_node['category'] == 'person' and right_node['category'] != 'person':\n connect_person_and_non_person_node(person_node=left_node,\n non_person_node=right_node)\n return\n\n if left_node['category'] != 'person' and right_node['category'] == 'person':\n connect_person_and_non_person_node(person_node=right_node,\n non_person_node=left_node)\n return\n\n connect_person_and_person_node(left_node=left_node,\n right_node=right_node)\n return", "def connect_both(node1, node2, weight):\n connect_one_way(node1, node2, weight)\n connect_one_way(node2, node1, weight)", "def join_channel(self, channel):\r\n self._send('JOIN #%s\\r\\n' % channel)", "def connect_channel(channel, service=VoidService, config={}):\n return service._connect(channel, config)", "def connect(self, source, target):\r\n connection = (self.coalesce_node(source), self.coalesce_node(target))\r\n self.connections.add(connection)", "def login(self, channel, username, password):\n self.username = username\n self.channel = channel\n self.emit('initChannelCallbacks')\n self.emit('joinChannel', {'name': channel})\n self.emit('login', {'name': username, 'pw': password})", "def connect(self, from_vertex, to_vertex):\n self.graph[from_vertex].append(to_vertex)\n self.graph[to_vertex].append(from_vertex)", "async def join(self, channel : str):\n # todo: check if # is required. If it is, append it at the start if DNE.\n await self._connection.join(channel)", "def join(self, channel):\n self.channels[channel.name.lower()] = channel\n channel.protocol = self.protocol\n self.protocol.join(channel.name)", "def add_connection(\n self, port1: ryvencore.NodePort.NodeOutput, port2: ryvencore.NodePort.NodeInput\n ) -> ryvencore.Connection.DataConnection:\n ryven_connection = self.script.flow.connect_nodes(port1, port2)\n if not ryven_connection:\n return\n\n # Add connection in compas graph\n node1 = port1.node\n node2 = port2.node\n edge_key = (node1.GLOBAL_ID, node2.GLOBAL_ID)\n if not self.has_edge(*edge_key):\n self.add_edge(*edge_key, {\"connections\": []})\n connections = self.edge_attribute(edge_key, \"connections\")\n connections.append({\"port1\": self.get_port_info(port1), \"port2\": self.get_port_info(port2)})\n self.edge_attribute(edge_key, \"connections\", connections)\n\n return ryven_connection", "def join(self, source, channel):\n\n self.channel_map[channel].add(source[0])\n self.nick_map[source[0]].add(channel)\n\n self.log(\"*** {0:s} has joined {1:s}\".format(source[0], channel))", "def join(self, channel):\n raise NotImplementedError", "def join(self, channel, password=None):\n\n if not channel:\n return\n\n # do join with password\n if password:\n self._raw('JOIN %s %s' % (channel, password))\n try:\n self.channels[channel.lower()]['key'] = password\n self.channels.save()\n except KeyError:\n pass\n else:\n # do pure join\n self._raw('JOIN %s' % channel)", "def connect_channel(chid, timeout=None, verbose=False):\n if verbose:\n write(' connect channel -> %s %s %s ' %\n (repr(chid), repr(state(chid)), repr(dbr.CS_CONN)))\n conn = (state(chid) == dbr.CS_CONN)\n if not conn:\n # not connected yet, either indicating a slow network\n # or a truly un-connnectable channel.\n start_time = time.time()\n ctx = current_context()\n pvname = name(chid)\n global _cache\n if ctx not in _cache:\n _cache[ctx] = {}\n\n if timeout is None:\n timeout = DEFAULT_CONNECTION_TIMEOUT\n\n while (not conn and ((time.time()-start_time) < timeout)):\n poll()\n conn = (state(chid) == dbr.CS_CONN)\n if not conn:\n _cache[ctx][pvname]['ts'] = time.time()\n _cache[ctx][pvname]['failures'] += 1\n return conn", "def connect_one_way(node1, node2, weight):\n node1.add_or_update_neighbour(node2, weight)", "def __connectNode__(self, numOfNodes, linksPerIteration):\n numOfLinks = 0\n node1 = self.getNode(numOfNodes - 1)\n # add n links per iteration\n while numOfLinks < linksPerIteration:\n #choose second node randomly\n nodeid2 = 0\n r = random.random()\n while nodeid2 < numOfNodes-1:\n node2 = self.getNode(nodeid2)\n #determine probability to choose node\n if self.degreeSum != 0 and node2.degree() != 0:\n pi = float(node2.degree()) / (self.degreeSum - node1.degree())\n else:\n pi = float(1)\n if not node1.hasLinkTo(node2):\n #choose node with probability pi\n if r < pi:\n numOfLinks += 1\n self.degreeSum += 2\n node1.addLinkTo(node2)\n node2.addLinkTo(node1)\n break\n r -= pi\n nodeid2 += 1", "def join_channel(self, server, username, channel):\n for sock in self.socks:\n if sock.server == server and username == sock.username:\n if sock.channel == channel:\n return sock\n sock.send(\"JOIN {}\\r\\n\".format(channel))\n print (\"[!] channel {} joined on {} with username {}\".format(channel, server, username))\n sock = IRC.Socket(self.dispatcher, sock.sock, username, server, channel)\n self.replyer.add_sock(sock)\n return sock\n return self.add_sock(server=server, username=username, channel=channel)", "def linkTrackToChannel(*args, **kwargs):\n pass", "def connect(source, destinations, ignores=tuple()):\n sourceChannelBox = ChannelBox(source, *ignores)\n sourceChannelBox.connect(*destinations)", "async def connect(self, channel_id: int):\n payload = {\n 'op': 4,\n 'd': {\n 'guild_id': self.guild_id,\n 'channel_id': str(channel_id),\n 'self_mute': False,\n 'self_deaf': False\n }\n }\n await self._bot._connection._get_websocket(int(self.guild_id)).send(json.dumps(payload))", "def connect( s, o1, o2 ):\n\n try:\n if isinstance( o1, int ) or isinstance( o2, int ): # special case\n if isinstance( o1, int ):\n o1, o2 = o2, o1 # o1 is signal, o2 is int\n assert isinstance( o1, Connectable )\n\n const = Const( o1.Type, o2 )\n const._parent = s\n o1._connect( const )\n\n else: # normal\n assert isinstance( o1, Connectable ) and isinstance( o2, Connectable )\n try:\n assert o1.Type == o2.Type\n except AttributeError:\n pass\n o1._connect( o2 )\n\n except AssertionError as e:\n raise InvalidConnectionError( \"\\n{}\".format(e) )", "async def connect(\n self, ctx: commands.Context, *, channel: t.Optional[discord.VoiceChannel]\n ) -> None:\n # We prioritise user-specified channels if they are available, else we use the author's current channel.\n destination = (\n channel\n if channel is not None\n else getattr(ctx.author.voice, \"channel\", None)\n )\n\n if destination is not None:\n player = self.get_player(ctx.guild)\n await player.connect(destination.id)\n else:\n fail = Embeds.status(success=False, desc=None)\n items = ctx.message.content.split()[1:]\n\n # Check that the user is invoking the connect command with an invalid channel.\n if len(items) > 0 and ctx.command.qualified_name == \"connect\":\n fail.description = f\"`{' '.join(items)}` is not a valid voice channel.\"\n else:\n fail.description = \"You aren't connected to a voice channel!\"\n\n await ctx.send(embed=fail)", "def join_network(self):\n connect_nodes_bi(self.nodes, 1, 2)\n self.sync_all()", "def Connect(self, node1_idx, node2_idx, arrow=False, weight = 0, capacity = -1, flow = 0):\n if node1_idx == node2_idx or node1_idx > self.NodesCount() or node2_idx > self.NodesCount():\n return False\n\n for n in self.nodes:\n if n.index == node1_idx:\n a = n\n elif n.index == node2_idx:\n b = n\n\n if self.isNetwork and (a.index != b.index and ((a,b) not in self.connections and (b, a) not in self.connections)):\n self.edges.append(Edge(len(self.edges)+1, a, b, arrow, weight, capacity, flow, isNetwork=True))\n self.connections.append((a, b))\n a.neighbours.append(b.index)\n return True \n # prevent from adding already connected nodes\n elif ((a.index != b.index and (a, b) not in self.connections and (b, a) not in self.connections) or \n (a.index != b.index and ((a,b) not in self.connections or (b, a) not in self.connections) and arrow) \n and not self.isNetwork):\n \n self.edges.append(Edge(len(self.edges)+1, a, b, arrow, weight))\n self.connections.append((a, b))\n if arrow:\n a.neighbours.append(b.index)\n return True\n else:\n if b.index not in a.neighbours:\n a.neighbours.append(b.index)\n if a.index not in b.neighbours:\n b.neighbours.append(a.index)\n return True\n else:\n return False", "async def connect(self, channel=\"btc_confirmed_exchange_flows\"):\n uri = \"wss://ws.tokenanalyst.io\"\n id = \"token_analyst_stream\"\n payload = {\"event\":\"subscribe\",\"channel\":channel,\"id\":id,\"key\":self._key}\n\n async with websockets.connect(uri, ping_timeout=None) as websocket:\n self._ws = websocket\n await websocket.send(json.dumps(payload))\n async for msg in websocket: \n data = await self.interpret(json.loads(msg), id)\n yield data", "def link_channel_cell(ar_cell_label,ar_coorx,ar_coory,ar_lambda,ar_cell_down,ar_n_c,Xext_flow,Yext_flow):\n cell=find_cell_coordinates(ar_cell_label,Xext_flow,Yext_flow,ar_coorx,ar_coory,ar_lambda,channel=False)\n hillslope=True\n li_ind=[]\n cc=0.\n while hillslope:\n ind=np.where(ar_cell_label==cell)\n if ar_lambda[ind]==1.:\n hillslope=False\n last_ind=ind\n else:\n cc=cc+1\n print 'Cell',cell,'has been conected to the channel network via cell',ar_cell_down[ind]\n li_ind.append(ind)\n ar_lambda[ind]=1.\n cell=ar_cell_down[ind]\n for i in li_ind:\n ar_n_c[i]=ar_n_c[last_ind]\n if cc==0.:\n print 'External flows already connected'\n return ar_lambda,ar_n_c", "def lndconnect(node_index):\n lndconnect_node(Node.from_index(node_index))", "def connect(self, node):\n self._hostname = node['host']\n node_hash = self._node_hash(node)\n if node_hash in self.__existing_connections:\n self._ssh = self.__existing_connections[node_hash]\n else:\n start = time()\n self._ssh.connect(node['host'], username=node['username'],\n password=node['password'])\n self.__existing_connections[node_hash] = self._ssh\n logger.trace('connect took {} seconds'.format(time() - start))" ]
[ "0.60193217", "0.58925235", "0.5827985", "0.57909036", "0.57899636", "0.56818223", "0.56715274", "0.5666792", "0.56497663", "0.56090224", "0.5600908", "0.55008054", "0.54504013", "0.5447329", "0.54283625", "0.5424262", "0.54172355", "0.53959185", "0.5352588", "0.53389084", "0.52952665", "0.52810204", "0.52622086", "0.5234003", "0.52277297", "0.5216579", "0.52122074", "0.519895", "0.51897985", "0.51892203" ]
0.73055255
0
crawl objct and return the result
def crawl_start(crawl_obj): res = None if crawl_obj.type in ['user', 'song'] : res = eval('crawl_' + crawl_obj.type)(crawl_obj) elif crawl_obj.type in ['artist', 'album'] : web_data = requests.get(crawl_obj.url, headers = cheat_headers) soup = bs4.BeautifulSoup(web_data.text, 'lxml') res = eval('crawl_' + crawl_obj.type)(crawl_obj.type, soup) else: print("Object type UNKNOWN!") return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _scrape(self):", "def scrape(self):\n pass", "def crawl(self, url):\n return None", "def scrape(self):\n try:\n self.result = urlfetch.fetch(self.url)\n except DownloadError:\n self.result = urlfetch.fetch(self.url) \n if ((self.result.status_code == 200) and\n (self.result.content_was_truncated == 0)):\n self.soup = BeautifulSoup(self.result.content)\n else:\n logging.critical(\"Bad Status Code: \", self.result.status_code, self.url)\n sys.exit(1)", "def parse(self, response):\n print('爬取链接',response.url)\n self.logger.info('爬取链接{}'.format(response.url))\n pattern=re.compile('q=([\\u4e00-\\u9fa5_a-zA-Z0-9]{0,})')\n target_url=unquote(response.url)\n keyword=re.findall(pattern,target_url)\n self.logger.info('组合{}'.format(keyword))\n print('组合{}'.format(keyword))\n js = json.loads(response.body.decode('utf-8'))\n print(js)\n\n if js.get('code')!=501:\n if js.get('totalCount') and js.get('totalCount') !=0:\n #proceed to next page\n total_count = js['totalCount']\n current_url_id = js['q']\n\n yield self.parse_detail(response,js)\n else:\n yield Request(url=response.url, callback=self.parse, dont_filter=True)", "def main():\n goods = '书包'\n # 爬取深度\n depth = 3\n start_url = 'https://s.taobao.com/search?q=' + goods\n # 输出结果的列表\n infoList = []\n # 使用for循环对每一个页面进行处理\n for i in range(depth):\n try:\n # 每个页面的URL链接\n url = start_url + '' + str(44*i)\n html = getHTMLText(url)\n parsePage(infoList, html)\n except:\n continue\n printGoodsList(infoList)", "def parse(self, url):\n pass", "def single_crawl(self, urlitem: str):\n # print(\"Item: \", urlitem)\n try:\n hdr = {\n \"User-Agent\": \"Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2228.0 Safari/537.36 \",\n \"Accept\": \"text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\",\n \"Accept-Charset\": \"ISO-8859-1,utf-8;q=0.7,*;q=0.3\",\n \"Accept-Encoding\": \"none\",\n \"Accept-Language\": \"en-US,en;q=0.8\",\n \"Connection\": \"keep-alive\",\n }\n try:\n req = Request(urlitem, headers=hdr)\n html_page = urlopen(req)\n soup = BeautifulSoup(html_page, \"lxml\")\n links = [\n requests.compat.urljoin(urlitem, link.get(\"href\"))\n for link in soup.findAll(\"a\")\n ]\n links = [x for x in links if \"#\" not in x]\n except Exception as e:\n # print(e)\n pass\n return links\n\n except:\n pass", "def crawl(spider: str, book_id: int):\n proc = CrawlerProcess(get_project_settings())\n\n proc.crawl(spider, book_id=book_id)\n\n proc.start()", "def crawl(self) -> None:\n result = self.__exec_request(self.url)\n if result == \"failed\":\n raise InterruptedError(\"The server responded with status code: {}\".format(self._status_code))\n self.__save_relevants_in_results(result, total=True)\n self.total_nums = self.results[\"total_results\"]\n pbar = tqdm(total=self.total_nums / 100) if self.to_be_num > self.total_nums else tqdm(total=self.to_be_num/100)\n pbar.update(1)\n if len(self.results[\"documents\"]) != self.to_be_num:\n while self.num_res < self.total_nums:\n # print(\"Is: {} | To be: {}\".format(self.num_res, self.total_nums))\n for el in result['search-results']['link']:\n if el['@ref'] == 'next':\n next_url = el['@href']\n result = self.__exec_request(next_url)\n if result == \"failed\":\n print(\"Invalid request. Server responded with Statuscode 400 while crawling. \"\n \"The found articles will be saved further on...\")\n break\n self.__save_relevants_in_results(result)\n pbar.update(1)\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n if len(self.results[\"documents\"]) == self.to_be_num:\n break\n pbar.close()", "def pywget_inside_crawler(url, depth, start_dir, start_file, root_dir_name):\n depth -= 1\n\n content = ''\n try:\n request = urllib.request.urlopen(url)\n content = request.read().decode(\"utf-8\")\n except:\n pass\n\n # all the information that's inside <a href> and <img src> tags\n match = re.findall(r'<a href=\"(.*?)\"', content) + \\\n re.findall(r'<a href = \"(.*?)\"', content) + \\\n re.findall(r'<img src=\"(.*?)\"', content) + \\\n re.findall(r'<img src = \"(.*?)\"', content)\n\n prefix = url[0 : url.rfind('/')] # a prefix of the link. useful to check if a link is under the same domain\n\n all_item_list = add_item_to_list(match, prefix) # add information to a list\n\n for item in all_item_list:\n pywget_recursive(item, depth, start_dir, start_file, root_dir_name) # recursively download the information", "def fetch(self):\n self.genre = \"Review\"\n try:\n if not self.__setSoup():\n log.info(self.log_msg(\"Soup not set,returning false\"))\n return False\n #if not self._getParentPage():\n # log.info(self.log_msg(\"Parent page not found\"))\n while True:\n parent_page_soup = copy.copy(self.soup)\n # log.info(self.log_msg('current uri%s'%parent_page_soup))\n if not self.__addReviews():\n log.info(self.log_msg('fetched all reviews for the url %s'\\\n %self.task.instance_data['uri']))\n \n log.info(self.log_msg('Next page%s'%self.currenturi))\n try:\n \n # self.currenturi = self.task.instance_data['uri'].rsplit\\\n # ('/', 1)[0] + '/' + self.soup.find('a', \\\n # title='Go to the next page')['href']\n self.currenturi = 'http://www.phonedog.com' + parent_page_soup.find('a',title='Go to the next page')['href']\n \n if not self.__setSoup():\n log.info(self.log_msg('soup not set for the uri %s'%\\\n self.currenturi))\n break\n except:\n log.info(self.log_msg('Next page not found for the uri %s'%\\\n self.currenturi))\n break\n return True\n except:\n log.exception(self.log_msg(\"Exception in fetch\"))\n return False", "def parse(self, response):\n for cate_selector in response.css(\"div.sub-nav-cont ul li a\"):\n link = cate_selector.css(\"::attr(href)\").extract_first()\n if response.urljoin(link) in self.start_urls:\n continue\n cate_name = cate_selector.css(\"::attr(c-bname)\").extract_first()\n meta = {\"cate_name\": cate_name, \"page_num\": 1,\n \"base_url\": link.rsplit(\".\", 1)[0]}\n yield response.follow(link, callback=self.parse_category, meta=meta)", "def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.gocomics.com/features', session, res)\n handle_url('http://www.gocomics.com/explore/editorial_list', session, res)\n handle_url('http://www.gocomics.com/explore/sherpa_list', session, res)\n save_result(res, json_file)", "def main():\n proxy = get_random_proxy()\n html = crawl(target_url)\n company_all_url = html.xpath('//*[@id=\"quotesearch\"]/ul/li/a/@href')\n code=['none']*len(company_all_url)\n for i in range(len(company_all_url)):\n s = str(str(company_all_url[i]))\n code[i]=s[(len(s) - 13):(len(s) - 5)]\n save_to_neo4j(code,0,len(code))", "def parse(self, response):\n\n #下面这种写法使用生成器方式比较好\n \"\"\" items = []\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n items.append(item)\n return items \"\"\"\n\n for i in response.css('div.quote'):\n item = ScrapequoteItem()\n item['tag'] = i.css('span.text[itemprop]::text').get()\n item['author'] = i.css('small.author::text').get()\n yield item\n\n #以下循环获取其他页面\n next_page = response.css('li.next a::attr(href)').get()\n if next_page is not None:\n yield response.follow(next_page, callback=self.parse) #返回一个Request instance", "def run(self):\n\n # The url is too deep, skip the url.. Work is done!\n if self.depth_ > self.depth:\n return\n\n # Get doc id corresponds to the url. Add a new entry into doc index if there is no entry.\n doc_id = self.crawler.document_id(self.curr_url)\n\n # Check if the doc_id has been visited/processed by any of crawler_threads. Add doc_id to seen if not so.\n if self.crawler.checkDocVisitedAndUpdate(doc_id):\n return\n\n # Process the document corresponds to the url\n socket = None\n try:\n socket = urllib2.urlopen(self.curr_url, timeout=self.timeout)\n soup = BeautifulSoup(socket.read())\n self._curr_depth = self.depth_ + 1\n self._curr_doc_id = doc_id\n # Traverse the document as deep as possible and add those newly discovered urls into url queue\n self._index_document(soup)\n # Store (wordId, docId) and (word, url) into inverted_index and resolved_inverted_index respectively.\n self.crawler._add_words_to_document(self._curr_words, self._curr_doc_id)\n except:\n pass\n finally:\n if socket:\n socket.close()", "def get_results():\n # store info in a dictionary {name -> shortname}\n res = {}\n session = requests.Session()\n handle_url('http://www.creators.com/comics/cat-seeall.html', session, res)\n save_result(res, json_file)", "def __init__(self, tree, result, url):\n self.tree = tree\n self.result = result\n self.url = url", "def crawl(url):\n while True:\n try:\n proxy=get_random_proxy()\n proxies = {'http': 'http://' + proxy}\n logger.info(proxies)\n resp = requests.get(url, proxies=proxies,timeout=3) # 设置代理,抓取每个公司的连接\n resp.encoding = resp.apparent_encoding # 可以正确解码\n if resp.status_code==200:\n html = etree.HTML(resp.text)\n logger.info(\"成功获得公司信息url!!!\")\n break\n else:\n continue\n except:\n logger.info(\"没获取到\")\n continue\n return html", "def processSearchResult(self):", "def scrap_site(link):\n pass # Scrapy or BeautifulSoup", "def crawl_website(url):\n page = get_page(url)\n source, created = Source.objects.get_or_create(url=url)\n source.last_sync = datetime.now()\n source.save()\n properties = get_properties(page)\n title = properties[0][:49].replace(\"\\n\", \" \")\n content = properties[1].replace(\"\\n\", \" \").encode('unicode_escape')\n summary = properties[2]['text'][:500].replace(\"\\n\", \" \").encode('unicode_escape')\n content = Content(source=source,\n title=title,\n summary=summary,\n content=content)\n content.save()\n Media(type=\"image\", content=content, url=properties[3]).save()\n\n return properties", "def get(self, scrap):\n return scrap", "def run(self):\n\n try:\n # Get the content from this page\n if self.verbose:\n print \"Getting page content for '%s'\" % self.url.strip()\n \n content = getPageContent(self.url)\n\n # Verify that this is not binary data\n if content is not None and isHTML(content):\n\n\n # Extract basic data about this result\n content = content.lower()\n title, keywords, description = parseMetaDataFromContent(content)\n headers = parseHeaderInformationFromContent(content)\n\n # Add this result data\n self.resultDictionary['title'] = title\n self.resultDictionary['keywords'] = keywords\n self.resultDictionary['description'] = description\n self.resultDictionary['content'] = content\n self.resultDictionary['headers'] = headers\n\n # Run the extensions\n for extension in self.extensions:\n extension.run(self.resultDictionary)\n\n\n except URLError:\n\n # Skip this URL, and register it as an error on the cache\n if self.verbose:\n print(\"Error accessing '%s', %s\" % (self.url.strip(), str(sys.exc_info()[1]).strip()))", "def __init__(self, lookup_result, scraper):\n self.scraper = scraper\n self.title = \"\"\n self.id = None\n self.links = []\n\n self.title = get_child_data(lookup_result, \"title\", \"\")\n self.id = get_child_data(lookup_result, \"id\", None)\n\n link = first_child(lookup_result, \"url\")\n while link:\n self.links.append(ScrapeURL(link, cache = scraper.cache))\n link = next_sibling(link, \"url\")\n return", "def crawl(self):\n try:\n self.crawl_pages()\n self.crawl_posts()\n self.crawl_comments()\n except Exception as exception:\n self.handle_request_limit(exception)", "def crawl(self, url, crawl_timeout, target_url, max_depth):\n try:\n\n max_depth = int(max_depth)\n if not max_depth >= 1:\n return []\n\n url_list = []\n httpClient = HttpClient()\n info = httpClient.send(url, float(crawl_timeout))\n #info = open(\"./res\").read()\n htmlUrl = HtmlUrl(url, target_url)\n htmlUrl.feed(info)\n imgUrl = ImgUrl(url, target_url)\n imgUrl.feed(info)\n htmlUrl.urls.extend(imgUrl.urls)\n map((lambda item: url_list.append(\n item.decode('utf-8').encode('utf8'))), htmlUrl.urls)\n url_list = list(set(url_list))\n\n url_final_list = copy.deepcopy(url_list)\n for url in url_list:\n url_final_list.extend(\n self.crawl(\n url,\n crawl_timeout,\n target_url,\n max_depth -\n 1))\n\n self.logger.info(\n \"crawl url %s,current max_depth is %s\" %\n (url, max_depth))\n return url_final_list\n except Exception as e:\n self.logger.info(\"crawl url %s error,reason %s\" % (url, e))\n return []", "def fetch(self):\r\n self.genre=\"Review\"\r\n try:\r\n self.parent_uri = self.currenturi\r\n self.total_posts_count = 0\r\n self.last_timestamp = datetime( 1980,1,1 )\r\n self.max_posts_count = int(tg.config.get(path='Connector',key='silverlight_numresults'))\r\n self.hrefs_info = self.currenturi.split('/')\r\n if '/forums/t/' in self.currenturi:\r\n if not self.__setSoup():\r\n log.info(self.log_msg('Soup not set , Returning False from Fetch'))\r\n return False\r\n self.__getParentPage()\r\n self.__addPosts()\r\n return True\r\n else:\r\n if not self.__setSoup():\r\n log.info(self.log_msg('Soup not set , Returning False from Fetch'))\r\n return False\r\n while True:\r\n if not self.__getThreadPage():\r\n break\r\n try:\r\n self.currenturi = self.soup.find('a',text='Next >').parent['href']\r\n if not self.__setSoup():\r\n break\r\n except:\r\n log.info(self.log_msg('Next Page link not found'))\r\n break\r\n if self.linksOut:\r\n updateSessionInfo('Search', self.session_info_out,self.last_timestamp , None,'ForumThreadsPage', self.task.instance_data.get('update'))\r\n\r\n return True\r\n except:\r\n log.exception(self.log_msg('Exception in fetch'))\r\n return False", "def crawl(url):\n try:\n # kondisi berhenti\n time_now = time.time() - start_time\n time_now_int = int(time_now)\n if time_now_int >= 900:\n return\n\n # memasukan url kedalam visited_url\n visited_url.append(url)\n\n # crawl page\n print(\"page yang sedang di crawl:\", url)\n page = requests.get(url)\n request = page.content\n soup = bs4.BeautifulSoup(request, 'html.parser')\n\n # extract title\n title = soup.title.string\n\n # check version html\n article_html5 = soup.find('article')\n if article_html5 is None:\n # extract text content from html4\n html5 = \"no\"\n texts = soup.find('body').findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n text = u\" \".join(t.strip() for t in visible_texts)\n text = text.lstrip().rstrip()\n text = text.split(',')\n clean_text = ''\n for sen in text:\n if sen:\n sen = sen.rstrip().lstrip()\n clean_text += sen+','\n complete_text = clean_text\n # print(complete_text)\n else:\n # extract text content from html5\n html5 = \"yes\"\n texts = article_html5.findAll(text=True)\n visible_texts = filter(tag_visible, texts)\n text = u\" \".join(t.strip() for t in visible_texts)\n text = text.lstrip().rstrip()\n text = text.split(',')\n clean_text = ''\n for sen in text:\n if sen:\n sen = sen.rstrip().lstrip()\n clean_text += sen+','\n complete_text = clean_text\n # print(complete_text)\n\n # get meta description\n description = soup.find(\"meta\",attrs={\"name\":\"description\"})\n if description is None:\n description = \"-\"\n else:\n description = description.get(\"content\")\n\n # get meta keywords\n keywords = soup.find(\"meta\",attrs={\"name\":\"keywords\"})\n if keywords is None:\n keywords = \"-\"\n else:\n keywords = keywords.get(\"content\")\n\n # isHotURL\n hot_link = \"no\"\n\n # check table if exist at crawldb\n cursor.execute(\n \"SELECT base_url, COUNT(*) FROM page_information WHERE base_url = %s GROUP BY base_url\",\n (url,)\n )\n results = cursor.fetchall()\n # gets the number of rows affected by the command executed\n row_count = cursor.rowcount\n if row_count == 0:\n # Create a new record\n sql = \"INSERT INTO `page_information` (`base_url`, `html5`, `title`, `description`, `keywords`, `content_text`, `hot_url`, `model_crawl`) VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, html5, title, description, keywords, complete_text, hot_link, \"BFS crawling\"))\n # commit to save our changes\n db.commit()\n else:\n # update database\n sql = \"UPDATE page_information SET hot_url = %s WHERE base_url = %s\"\n # Execute the query\n cursor.execute(sql, (hot_url, url))\n # commit to save our changes\n db.commit()\n\n # extract style\n for style in soup.findAll('style'):\n # Create a new record\n sql = \"INSERT INTO `style_resource` (`base_url`, `style`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, style))\n # commit to save our changes\n db.commit()\n\n # extract script\n for script in soup.findAll('script'):\n # Create a new record\n sql = \"INSERT INTO `script_resource` (`base_url`, `script`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, script))\n # commit to save our changes\n db.commit()\n\n # extract lists\n for lists in soup.findAll('li'):\n # Create a new record\n sql = \"INSERT INTO `list` (`base_url`, `list`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, lists))\n # commit to save our changes\n db.commit()\n\n # extract forms\n for form in soup.findAll('form'):\n # Create a new record\n sql = \"INSERT INTO `forms` (`base_url`, `form`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, form))\n # commit to save our changes\n db.commit()\n\n # extract tables\n for table in soup.findAll('table'):\n # Create a new record\n sql = \"INSERT INTO `tables` (`base_url`, `tables`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, table))\n # commit to save our changes\n db.commit()\n\n # extract images\n for image in soup.findAll('img'):\n # Create a new record\n sql = \"INSERT INTO `images` (`base_url`, `image`) VALUES (%s, %s)\"\n # Execute the query\n cursor.execute(sql, (url, image))\n # commit to save our changes\n db.commit()\n\n # extract outgoing link\n links = soup.findAll(\"a\", href=True)\n\n # memasukan outgoing link kedalam queue\n for i in links:\n flag = 0\n\n # Complete relative URLs and strip trailing slash\n complete_url = urljoin(url, i[\"href\"]).rstrip('/')\n\n # create graph\n # G.add_edges_from([(url, complete_url)])\n\n # create list graph\n branch = []\n # remove https://\n new_url = url.replace('https://', '')\n new_url = new_url.replace('http://', '')\n new_complete = complete_url.replace('https://', '')\n new_complete = new_complete.replace('http://', '')\n branch.append(new_url)\n branch.append(new_complete)\n list_g.append(branch)\n\n # Create a new record\n sql = \"INSERT INTO `linking` (`crawl_id`, `url`, `outgoing_link`) VALUES (%s, %s, %s)\"\n # Execute the query\n cursor.execute(sql, (1, url, complete_url))\n # commit to save our changes\n db.commit()\n\n # Check if the URL already exists in the url_queue\n for j in url_queue:\n if j == complete_url:\n flag = 1\n break\n\n # Check if the URL already exists in the visited_url\n for j in visited_url:\n if (j == complete_url):\n flag = 1\n break\n\n # If not found in queue\n if flag == 0:\n if (visited_url.count(complete_url)) == 0:\n url_queue.append(complete_url)\n\n except (AttributeError, KeyError, requests.exceptions.InvalidSchema, requests.exceptions.ConnectionError):\n title = \"no-title\"\n complete_text = \"no-text\"\n\n # crawl url selanjutnya\n if len(url_queue) == 0:\n return\n current = url_queue.popleft()\n\n # # create list graph\n # branch = []\n # # remove https://\n # new_url = url.replace('https://', '')\n # new_complete = current.replace('https://', '')\n # branch.append(new_url)\n # branch.append(new_complete)\n # list_g.append(branch)\n\n crawl(current)" ]
[ "0.71161443", "0.66178185", "0.6551923", "0.60435236", "0.6042226", "0.6040767", "0.59387845", "0.5858021", "0.5807145", "0.58034575", "0.5781588", "0.5735748", "0.5689979", "0.568243", "0.56674963", "0.5655977", "0.56543124", "0.5643372", "0.56385326", "0.5625415", "0.56237787", "0.5611752", "0.5600711", "0.55862534", "0.5581333", "0.55676764", "0.55581707", "0.5525734", "0.5521294", "0.55053645" ]
0.6746438
1
iterates over points on the board
def points_generator(self): rows, cols = self.game.board.board_size points = [Point(i, j) for i, j in product(range(rows), range(cols))] for point in points: yield point
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def iter_points(self):\n for x in range(self.left, self.right + 1):\n for y in range(self.top, self.bottom + 1):\n yield Point(x, y)", "def grid_points(self):\n for i in range(self.rows):\n for j in range(self.cols):\n min_lat,max_lat,min_lon,max_lon = self.coords_to_min_max_lat_lon((i,j))\n if i == 0:\n print_gps(max_lat,max_lon,\"grid\")\n if j == 0:\n print_gps(max_lat,min_lon,\"grid\")\n if j == 0:\n print_gps(min_lat,min_lon,\"grid\")\n print_gps(min_lat,max_lon,\"grid\")", "def __iter__(self):\n for point in self.points:\n yield point", "def __iter__(self):\n pt = (self.x, self.y)\n for i in pt:\n yield i", "def __iter__(self):\n return self.points.__iter__()", "def iterate_adjacent(position):\n return iter(_board_graph[position])", "def iter_grid_tiles(self):\n all_points = self.grid[0].union(self.grid[1], self.grid[2], {self.position})\n min_x = min(p.x for p in all_points)\n min_y = min(p.y for p in all_points)\n\n if min_x < 0:\n xoffset = -min_x\n elif min_x == 0:\n xoffset = 0\n elif min_x > 0:\n xoffset = min_x\n if min_y < 0:\n yoffset = -min_y\n elif min_y == 0:\n yoffset = 0\n elif min_y > 0:\n yoffset = min_y\n origin = Point(0 + xoffset, 0 + yoffset)\n position = Point(self.position.x + xoffset, self.position.y + yoffset)\n for tile_type in (0, 1, 2):\n for point in self.grid[tile_type]:\n newpoint = Point(point.x + xoffset, point.y + yoffset)\n if newpoint not in (origin, position):\n yield newpoint.x, newpoint.y, tile_type\n yield origin.x, origin.y , 4\n yield position.x, position.y, 3", "def iter_coords():\n yield (0, 0)\n incr = 0\n x = 1\n y = 0\n\n while True:\n incr += 2\n\n top = y + incr - 1\n bot = y - 1\n left = x - incr\n right = x\n\n yield (x, y)\n while y < top:\n y += 1\n yield (x, y)\n\n while x > left:\n x -= 1\n yield (x, y)\n\n while y > bot:\n y -= 1\n yield (x, y)\n\n while x < right:\n x += 1\n yield (x, y)\n\n x += 1", "def __iter__(self):\n for coord in self.position:\n yield coord", "def __iter__(self):\n for idx in range(0, self.Npoints):\n position = self.start + (self.end-self.start)/self.Npoints*idx\n yield position\n raise StopIteration()", "def iter_coordinates(self):\n for coord in self.position:\n yield coord", "def play_round_Conway_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n\n for m in range(i - 1, i + 2):\n for n in range(j - 1, j + 2):\n self.board[m][n].live_neighbors += status\n self.board[i][j].live_neighbors -= status", "def iter_neighbors(x: int, y: int) -> t.Generator[COORDINATE, None, None]:\n yield x - 1, y\n yield x + 1, y\n yield x, y - 1\n yield x, y + 1", "def display(self):\n for i in range(self.height - 1, 0, -1):\n for j in range(self.width):\n # yield i, j - 1, self.grid[i][j - 1]\n yield j, i, self.dungeon.tile(Point(j, i))\n\n \"\"\"\n def __iter__(self):\n for i in range(self.height):\n for j in range(self.width):\n yield Point(x=self.x + j, y=self.y + i)\n \"\"\"", "def piecesGenerator(self,player):\n for row in range(8):\n for col in range(8):\n if self.board[row][col] != None:\n piece,pos = self.pieceAt((row,col)) ,((row,col))\n if piece['player'] == player:\n yield piece,pos", "def place_piece(piece, px, py, pc):\n \n\n for i, j in piece:\n x = px + i\n y = py + j\n if not (0 <= x < BOARD_WIDTH):\n continue\n if not (0 <= y < BOARD_HEIGHT):\n continue\n board[y][x] = pc", "def __iter__(self):\n while (self.pointsleft > 0):\n current = min(self.pointsleft, self.settings.LOCALSKIPNUM)\n for i in range(current):\n self.add(self.fabric.getcoordinate())\n self.pointsleft -= self.settings.LOCALSKIPNUM\n self.pointscontroller.set(self.graph)\n yield self.graph", "def update(self, board):\n for row in range(8):\n for col in range(8):\n if board[row, col] == -1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[0])\n elif board[row, col] == -2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[2])\n elif board[row, col] == 0:\n self.circles[row][col].undraw()\n self.pieces[row][col].setFill(self.frame_colors[(row+col)%2])\n elif board[row, col] == 1:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[1])\n elif board[row, col] == 2:\n self.circles[row][col].undraw()\n self.circles[row][col].draw(self.win)\n self.circles[row][col].setFill(self.piece_colors[3])", "def _check_neighbors(self):\n for direction, dir_info in self.DIRECTIONS.items():\n pos = Point(\n self.position.x + dir_info[\"mask\"][0],\n self.position.y + dir_info[\"mask\"][1]\n )\n status = self.move(direction)\n self.grid[status].add(pos)\n if status in (1, 2):\n # moved\n self.move(dir_info[\"opposite\"])\n yield pos", "def __iterate(self):\n\t\tnext_board = []\n\n\t\tfor y, row in enumerate(self.__board):\n\t\t\tnext_board.append([])\n\n\t\t\tfor x, cell in enumerate(row):\n\t\t\t\tneighbors = [\n\t\t\t\t\tself.__get_cell_state(y - 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y - 1, x),\n\t\t\t\t\tself.__get_cell_state(y - 1, x + 1),\n\t\t\t\t\tself.__get_cell_state(y, x - 1),\n\t\t\t\t\tself.__get_cell_state(y, x + 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x - 1),\n\t\t\t\t\tself.__get_cell_state(y + 1, x),\n\t\t\t\t\tself.__get_cell_state(y + 1, x + 1)\n\t\t\t\t]\n\t\t\t\tnum_neighbors = sum(neighbors)\n\t\t\t\tstate = get_new_state(cell, num_neighbors)\n\t\t\t\tnext_board[y].append(state)\n\n\t\tself.__board = next_board\n\t\tself.__display(self.__board)", "def enumerate_points(self):\n\t\traise Exception(NotImplemented)", "def evaluate(self, board):", "def __iter__(self):\n width = self.GetWidth()\n height = self.GetHeight()\n pixels = self.GetPixels()\n \n\n\n\n class PixelFacade(object):\n def Get(self):\n return pixels.Get()\n def Set(self, *args, **kw):\n return pixels.Set(*args, **kw)\n def __str__(self):\n return str(self.Get())\n def __repr__(self):\n return 'pixel(%d,%d): %s' % (x,y,self.Get())\n X = property(lambda self: x)\n Y = property(lambda self: y)\n \n pf = PixelFacade() \n for y in xrange(height):\n pixels.MoveTo(self, 0, y)\n for x in xrange(width):\n\n\n\n yield pf \n pixels.nextPixel()", "def __iter__(self):\n width = self.GetWidth()\n height = self.GetHeight()\n pixels = self.GetPixels()\n \n\n\n\n class PixelFacade(object):\n def Get(self):\n return pixels.Get()\n def Set(self, *args, **kw):\n return pixels.Set(*args, **kw)\n def __str__(self):\n return str(self.Get())\n def __repr__(self):\n return 'pixel(%d,%d): %s' % (x,y,self.Get())\n X = property(lambda self: x)\n Y = property(lambda self: y)\n \n pf = PixelFacade() \n for y in xrange(height):\n pixels.MoveTo(self, 0, y)\n for x in xrange(width):\n\n\n\n yield pf \n pixels.nextPixel()", "def play_round_Fredkin_Cell(self):\n for x in self.board:\n for f in x:\n f.live_neighbors = 0\n\n for i in range(1, self.cols - 1):\n for j in range(1, self.rows - 1):\n status = self.board[i][j].status\n assert type(status)==int \n for m in range(i-1 , i +2):\n self.board[m][j].live_neighbors += status\n for n in range(j-1 , j +2):\n self.board[i][n].live_neighbors += status\n\n self.board[i][j].live_neighbors -= status", "def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)", "def all_cells(self):\n \"\"\"\n Note that we use the convention that the first cell is (1,1)\n \"\"\"\n spart_star = self.circle_star()\n part = Partition(list(spart_star))\n coordinates = part.cells()\n coordinates = [(x+1, y+1) for x, y in coordinates]\n return coordinates", "def check_keypoints(keypoints: Sequence[Sequence], rows: int, cols: int) -> None:\n for kp in keypoints:\n check_keypoint(kp, rows, cols)", "def coordinates(self):", "def get_neighbors(point):\n pt = point.copy()\n output= [point.copy() for i in range(4)]\n output[0:2] = map(Point.setY, output[0:2], [pt.getY()+ i for i in range(-1,2,2)])\n output[2:4]= map(Point.setX, output[2:4], [pt.getX()+ i for i in range(-1,2,2)])\n return output" ]
[ "0.72778916", "0.68592465", "0.6857867", "0.6820964", "0.663074", "0.6541899", "0.64622074", "0.6450496", "0.6430873", "0.6379434", "0.6351412", "0.6338828", "0.62530154", "0.6235255", "0.622739", "0.6218105", "0.6214263", "0.6176402", "0.61315227", "0.61090213", "0.60718495", "0.6050939", "0.6050539", "0.6050539", "0.60499495", "0.60100085", "0.60036093", "0.60014665", "0.5996907", "0.5986623" ]
0.6902485
1
Returns the minimum value of a and b ignoring any negative values.
def _get_min_positive_value(self, a, b): if a < 0 and b >= 0: return b if a >= 0 and b < 0: return a return min(a, b)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mini(a, b):\n return min(a, b)", "def _null_min(a, b):\n if a is None:\n return b\n if b is None:\n return a\n return min(a, b)", "def minimum_inplace(a, b):", "def minimum(lhs, rhs):\n return _make.minimum(lhs, rhs)", "def smallest_diff(a, b):\n b.sort()\n smallest_diff = None\n\n for n in a:\n idx = bisect_left(b, n)\n diff = min(abs(b[idx - 1] - n), abs(b[idx] - n))\n if smallest_diff is None or smallest_diff > diff:\n smallest_diff = diff\n\n return smallest_diff", "def mini(a,b):\n\tif a < b: \n\t\treturn a\n\treturn b", "def smart_min(v1, v2):\n\n if v1 is None:\n return v2\n\n if v2 is None:\n return v1\n\n return min(v1, v2)", "def smallest(*args):\r\n if len(args) == 2:\r\n a, b = args\r\n return switch(a < b, a, b)\r\n else:\r\n return min(stack(*args), axis=0)", "def minDist(l, a, b):\n pre = 0\n rt = float('INF')\n for i in range(len(l)):\n if l[i] == a or l[i] == b:\n pre = i\n break\n\n for i in range(pre+1, len(l)):\n if l[i] == a or l[i] == b:\n if l[i] != l[pre] and i - pre < rt:\n rt = i - pre\n pre = i\n return rt", "def mod_min(a, b):\n # like divmod_min, just skipping a single add\n r = (a % b)\n diff = b - r\n if abs(r) > abs(diff):\n r = -diff\n return r", "def to_zero(a, b):\n if abs(a) < abs(b):\n return a\n return b", "def least_difference(a, b, c):\n\n diff1 = abs(a - b)\n diff2 = abs(b - c)\n diff3 = abs(a - c)\n return min(diff1, diff2, diff3)", "def scalar_min(self, dst, src0, src1):\n return self._scalar_binary_func('min', dst, src0, src1)", "def divmod_min(a, b):\n q, r = divmod(a, b)\n \n # we will want to adjust r if\n # (|r| > |b/2|), which is equivalent to checking\n # (|2r| > |b|),\n # (|r| > |b| - |r|)\n # then using the fact that for python,\n # divmod will give |r| < |b| and r,b will have the same sign\n # (|r| > |b - r|)\n diff = b - r\n if abs(r) > abs(diff):\n q = q + 1 \n r = -diff\n return q,r", "def l2Min(A, b):\n #set up the matrices\n solvers.options['show_progress'] = False\n m,n = A.shape\n Q = matrix(2*np.eye(n))\n r = matrix(np.zeros(n))\n A = matrix(A.astype(float))\n b = matrix(b.astype(float))\n #solve the matrices\n sol=solvers.qp(Q, r,A=A,b=b)\n return np.ravel(sol['x']), sol['primal objective']", "def find_closest(a, b):\n a = np.atleast_1d(np.array(a))\n b = np.atleast_1d(np.array(b))\n out = [np.argmin(abs(b - a1)) for a1 in a]\n return out", "def minimum(self, start, end):\n return self.foldl1(start, end, min)", "def minimum(x,y,z):\r\n\treturn min(min(x,y),z)", "def d_min(x, y):\n axis = np.argmax(x.shape)\n return np.min(np.array([x, y]), axis=axis)", "def min(self, other):\n ox = self._op_check(other)\n r = self.dec_value.min(ox)\n if r == self.dec_value:\n return self\n else:\n return other", "def least_difference(self):\n diff1 = abs(self.a - self.b)\n diff2 = abs(self.b - self.c)\n diff3 = abs(self.a - self.c)\n return min(diff1, diff2, diff3)", "def min_or_none(val1, val2):\n return min(val1, val2, key=lambda x: sys.maxint if x is None else x)", "def minimum(x, y):\r\n # see decorator for function body\r", "def compare_min(values, weights):\n return np.min(values.numpy())", "def min(x):\n pass", "def min(self, other):\n ox, ctx = self._op_check(other)\n r = self.dec.min(ox)\n if r == self.dec:\n return self\n else:\n return other", "def a_plus_abs_b(a, b):\n if b < 0:\n op = sub(a,b)\n else:\n op = add(a,b)\n return op", "def l1Min(A, b):\n #set up the matrices\n m,n = A.shape\n solvers.options['show_progress'] = False\n c = np.concatenate([np.ones(n),np.zeros(n)]).astype(float)\n G = np.vstack((np.hstack((-np.eye(n),np.eye(n))), np.hstack((-np.eye(n),-np.eye(n))),np.hstack((-np.eye(n),np.zeros((n,n))))))\n h = np.zeros(3*n).astype(float)\n A = np.hstack((np.zeros((m,n)),A)).astype(float)\n #convert the matrices\n c = matrix(c)\n G = matrix(G)\n h = matrix(h)\n A = matrix(A)\n b = matrix(b.astype(float))\n #solve the matrices\n sol = solvers.lp(c, G, h,A,b)\n\n return np.ravel(sol['x'][n:]),sol['primal objective']", "def my_sum(a,b, min_value= None, max_value=None):\n c = abs(a) + abs(b)\n if min_value is None: min_value = np.min(c)\n if max_value is None: max_value = np.max(c)\n return np.clip(c, float(min_value), float(max_value))", "def my_func(a, b, c):\r\n return (a + b + c) - min(a, b, c)" ]
[ "0.79426205", "0.7518428", "0.7121123", "0.702984", "0.6997932", "0.69437057", "0.68694836", "0.68295264", "0.6827028", "0.6682797", "0.66441596", "0.6604545", "0.6468406", "0.64508706", "0.64410925", "0.64327365", "0.63909256", "0.6367581", "0.6353903", "0.63516575", "0.63503456", "0.6325124", "0.62710184", "0.6263289", "0.62486655", "0.62416506", "0.61507225", "0.61226577", "0.6098779", "0.6098528" ]
0.8785171
0
Retrieves all flashcards in ascending order (max 250 at a time) or using basic pagination returns `qty` flashcards occuring after `start`.
def retrieve_all_flashcards(start: int=0, qty:int=None): qty = 250 if qty == None else qty with sqlite3.connect(current_app.config['DB']) as db: c = db.cursor() c.execute(""" SELECT id, title, description, source, image_url, tags FROM flashcards WHERE id >= ? ORDER BY id ASC LIMIT ? """, (start, qty) ) raw_cards = c.fetchall() cards = [] for card in raw_cards: cards.append( Flashcard( id=card[0], title=card[1], description=card[2], source=card[3], image_url=card[4], tags=json.loads(card[5]) ) ) return cards
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cards(query_param):\n return _query_scryfall(query_param)", "def get_all(self, start_at, limit, order=None):", "def get_cards(shop, redas=None, skip=0, limit=40):\n connection = pymongo.MongoClient(MONGO_URL)\n db = connection[DB]\n\n selector = {'shops.' + shop: {'$exists': 1}}\n if redas:\n selector['redaction'] = {'$in': redas}\n\n sort = [['shops.' + shop + '.overpay', pymongo.DESCENDING]]\n\n return [tocard(card_dict) for card_dict in db.cards.find(selector).sort(sort).skip(skip).limit(limit)]", "def refresh_pages(self):\n\n # Calculate all cards to show.\n id_card_groups = {klass: [] for klass in self.KlassOrder}\n for k, v in all_cards().items():\n if v.data['derivative']:\n continue\n if any(not filter_fn(v.data['cost']) for filter_fn in self.cost_filter_fns):\n continue\n if not self._match_search_text(v):\n continue\n id_card_groups[v.data['klass']].append((k, v))\n\n card_id_groups = {\n klass: [k for k, v in sorted(id_card_group, key=self._card_order)]\n for klass, id_card_group in id_card_groups.items()\n }\n\n # Split into pages.\n page_size = self.PageSize[0] * self.PageSize[1]\n self.page_list_groups = {\n klass: [\n card_id_group[i * page_size: (i + 1) * page_size]\n for i in range((len(card_id_group) + page_size - 1) // page_size)\n ]\n for klass, card_id_group in card_id_groups.items() if card_id_group\n }\n\n # Get the first available klass. Try old klass id at first.\n klass_order = 0 if self.klass_id is None else self.KlassOrder[self.klass_id]\n if self.KlassOrderR[klass_order] not in self.page_list_groups:\n # If current klass is empty, search start from 0.\n klass_order = 0\n while klass_order < len(self.KlassOrderR) and self.KlassOrderR[klass_order] not in self.page_list_groups:\n klass_order += 1\n if klass_order == len(self.KlassOrderR):\n # The result page is empty.\n new_klass_id = None\n else:\n new_klass_id = self.KlassOrderR[klass_order]\n self._refresh_klass_icons()\n self.set_klass_id(new_klass_id, page_to_0=True)", "def page9(self):\n result = request901.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid)\n\n return result", "def get_stock():\n offset = int(request.args.get('offset', 0))\n if offset < 0 or not isinstance(offset, int) or isinstance(offset, bool): # isinstance(True, int) == True...\n abort(400)\n stock = db_interface.get_stock()\n encoded_url = request.url.replace(' ', '%20') # Replace all spaces in the URL string (why are they even there?)\n next_offset = offset + min(PAGINATION_LIMIT, len(stock[offset:])) # Find the next offset value\n if offset == 0:\n # Append the offset value to the URL string\n if len(stock[next_offset:]) == 0:\n next_url = None\n else:\n next_url = '%s?offset=%s' % (encoded_url, next_offset)\n prev_url = None\n else:\n # Replace the offset value in the URL string\n if len(stock[next_offset:]) == 0:\n next_url = None\n else:\n next_url = re.sub(r'offset=\\d+', 'offset=%s' % next_offset, encoded_url)\n\n if offset-PAGINATION_LIMIT <= 0:\n prev_url = re.sub(r'&offset=\\d+', '', encoded_url)\n print prev_url, encoded_url\n if prev_url == encoded_url:\n prev_url = re.sub(r'\\?offset=\\d+', '', encoded_url)\n else:\n prev_url = re.sub(r'offset=\\d+', '&offset=%s' % (offset-PAGINATION_LIMIT), encoded_url)\n meta = {'count': len(stock[offset:next_offset]),\n 'offset': offset,\n 'total_count': len(stock),\n 'next': next_url,\n 'previous': prev_url\n }\n resp = Response(json.dumps({'stock': stock[offset:next_offset], 'meta': meta}, indent=4), content_type='application/json; charset=utf8')\n return resp", "def cards():\n if user_loggined():\n user = models.User.query.get(session['user_id'])\n u_cards = user.cards.all()\n prep_cards = []\n for card in u_cards:\n prep_cards.append(card.type + ' **** '+card.cnb[-9:])\n else:\n return redirect(url_for('index'))\n return redirect(url_for('index'))", "def page10(self):\n result = request1001.GET('/Cars_Sample_App/search.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=carEnquiries&cid=2'), ))\n\n return result", "def get_cards(self):\n card = self._starting_card\n return card", "def page26(self):\n self.token_mid = \\\n '1'\n result = request2601.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n\n return result", "def get_all(self, start=0, count=-1, filter='', sort=''):\n return self._client.get_all(start=start, count=count, filter=filter, sort=sort)", "def old_start_to_scrape_stocks():\n # the way it works is: 20 stocks are displayed per page, and the r= paramater in the url tells where to start listing with the stocks\n res = req.get(stocks_url.format('1'), headers={'user-agent': ua.random})\n soup = bs(res.content, 'lxml')\n # get last page number to get pages that need to be iterated through\n last_page_num = int(soup.findAll('a', {'class': 'screener-pages'})[-1].text)\n # the last page should be the (last page number - 1) * 20 + 1\n last_r = (last_page_num - 1) * 20 + 1 + 1 # add another one for range to work\n for p in range(21, last_r, 20):\n pass", "def get_records(field_id):\n if not request.is_xhr:\n abort(403)\n\n if field_id == 0:\n field_id = session.get('current_field_id', 2)\n\n field = Field.query.get(field_id)\n records = field.records.limit(10)\n top_10 = []\n for record in records:\n is_you = False\n current_player = session.get('player', '')\n if current_player == record.player.name:\n is_you = True\n top_10.append(\n {\n 'size': field.name,\n 'player': record.player.name,\n 'shot_count': record.shot_count,\n 'seconds': record.seconds,\n 'isYou': is_you,\n }\n )\n\n if not top_10:\n top_10 = [{'size': field.name},]\n\n return jsonify(top_10)", "def get_full_container_list(container_name, **kwargs):\n limit = 10000\n kwargs['limit'] = limit\n page = []\n seed = []\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n while len(page) == limit:\n # keep getting pages..\n kwargs['marker'] = seed[-1]['name']\n _, page = get_conn().get_container(container_name, **kwargs)\n seed.extend(page)\n\n return seed", "def getJobcards(request):\n if request.method == 'GET':\n jcEnd=request.GET.get('jobend', '')\n jcContains=request.GET.get('vcode', '')\n ptid=request.GET.get('ptid', '')\n limit=request.GET.get('limit', '')\n if limit == '':\n limit=50\n else:\n limit=int(limit)\n if jcContains == '':\n jobcards = WorkDetail.objects.filter(worker__jobcard__panchayat__id = ptid, worker__jobcard__jobcard__endswith = jcEnd).values(\"worker__jobcard__jobcard\").annotate(totalTrans = Count('pk'), jobcard = F('worker__jobcard__jobcard'), headOfHousehold = F('worker__jobcard__headOfHousehold'))\n else:\n jobcards = WorkDetail.objects.filter(worker__jobcard__panchayat__id = ptid, worker__jobcard__jobcard__endswith = jcEnd, worker__jobcard__jobcard__icontains = jcContains).values(\"worker__jobcard__jobcard\").annotate(totalTrans = Count('pk'), jobcard = F('worker__jobcard__jobcard'), headOfHousehold = F('worker__jobcard__headOfHousehold'))\n\n jobcards = jobcards[:limit]\n serializer = JobcardSerializer2(jobcards, many=True)\n return JsonResponse(serializer.data, safe=False)", "def get_card(name_str, page=1):\r\n payload = {'name': name_str, 'page': page}\r\n response = query('https://api.magicthegathering.io/v1/cards', payload)\r\n return response.json()", "def get_all_cards(self, filter='open'):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter=filter):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "def list(self, request, *args, **kwargs):\n\n queryset = self.filter_queryset(self.get_queryset())\n\n page = request.query_params.get('page', 1)\n paginator = Paginator(queryset, 8)\n\n try:\n queryset = paginator.page(page)\n\n except PageNotAnInteger:\n queryset = paginator.page(1)\n\n except EmptyPage:\n queryset = paginator.page(paginator.num_pages)\n\n page = int(page)\n\n serializer = self.get_serializer(queryset, many=True)\n return Response({'items': serializer.data, 'page': page, 'pages': paginator.num_pages})", "def get_top(n):\n \n coins = []\n coin_count = 0\n page = 1\n while coin_count < n:\n data = json.loads(requests.get(URL_TOP_COINS.format(page)).text)\n for coin in data:\n coins.append({\"gecko_id\": coin['id'], 'symbol': coin['symbol'].upper(), 'logo':coin['image']})\n page += 1\n coin_count += len(data)\n sleep(0.3)\n return coins[:n]", "def test_consumed_cards(self):\n game = TestGames.replay(9, [3, 1, 0, 0])\n consumed_cards = game.consumed_cards()\n self.assertEqual(len(consumed_cards), 8)\n\n self.assertListEqual(list(consumed_cards),\n [2 / 5, # guards\n 0 / 2, # priest\n 1 / 2, # baron\n 0 / 2, # handmaid\n 1 / 2, # prince\n 0 / 1, # king\n 0 / 1, # countess\n 0 / 1]) # princess", "def page24(self):\n self.token_mid = \\\n '7'\n result = request2401.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 6 different values for token_cid found in response, using the first one.\n self.token_cid = \\\n httpUtilities.valueFromBodyURI('cid') # '20'\n\n return result", "def fetchBatchAccounts(config, start, limit): \n config['params']['from'] = start\n config['params']['limit'] = limit\n url = config['domain']\n r = requests.get(url, headers=config['headers'], params=config['params']).json()\n print(\"Downloading From: \", config['params']['from'], ' To: ', config['params']['from'] + config['params']['limit'], '| Limit: ', config['params']['limit'])\n return r", "def listings(self, b_start=None, b_size=None):\n if b_size == None:\n b_size = self.batch_size\n if b_start == None:\n b_start = (getattr(self, 'page', 1) - 1) * b_size\n if self.context.portal_type == 'Folder':\n content_filter = {\n 'b_start': b_start,\n 'b_size': b_size,\n 'portal_type': 'Event',\n 'sort_on': 'start',\n 'sort_order': 'ascending',\n 'review_state': 'published',\n 'start': {'query': DateTime(), 'range': 'min'},\n }\n items = self.context.getFolderContents(\n content_filter, batch=True\n )\n elif self.context.portal_type == 'Topic':\n if b_start and not self.request.get('b_start'):\n self.request['b_start'] = b_start\n items = self.context.queryCatalog(self.request, True, b_size)\n elif self.context.portal_type == 'Collection':\n items = self.context.results(True, b_start=b_start, b_size=b_size)\n else:\n items = []\n return items", "def get_all_open_cards(self):\n print('Searching Trello cards..\\n')\n done_sources = []\n for list in self.my_lists:\n for card in list.list_cards(card_filter='open'):\n name = card.name.split()[0]\n done_sources.append(card)\n return done_sources", "def page8(self):\n result = request801.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid)\n self.token_carName = \\\n httpUtilities.valueFromBodyURI('carName') # 'S'\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'carEnquiries'\n\n return result", "def top_playlists():\n page = request.args.get('page', 1, type=int)\n playlists = PlaylistModel.query.filter(PlaylistModel.import_count > 0).order_by(PlaylistModel.import_count.desc()).paginate(\n page=page, per_page=8, error_out=False\n )\n\n return render_template(\n 'top.html',\n playlists=playlists.items,\n pagination=playlists, # this is a pagination object,\n active='most-imported',\n )", "def start():\n mongo_collection = mongo_database[\"questions\"]\n all_cards = mongo_collection.find({\"visible\": \"Yes\"})\n objects = []\n for object in all_cards:\n objects.append(object)\n random.shuffle(objects)\n return render_template(\"start.html\", cards=objects)", "def list_resources(self, start_response, offset, prop, obj, obj_offset):\n limit = 20\n has_more, results = self.backend.list_resources(\n offset, limit, prop, obj)\n template = open(resolve(\"html/list.html\")).read()\n if offset > 0:\n has_prev = \"\"\n else:\n has_prev = \"disabled\"\n prev = max(offset - limit, 0)\n if has_more:\n has_next = \"\"\n else:\n has_next = \"disabled\"\n nxt = offset + limit\n pages = \"%d - %d\" % (offset + 1, offset + min(limit, len(results)))\n facets = []\n for facet in FACETS:\n if \"list\" not in facet or facet[\"list\"] is True:\n facet['uri_enc'] = quote_plus(facet['uri'])\n if (\"<%s>\" % facet['uri']) != prop:\n facets.append(facet)\n else:\n facet = copy(facet)\n mv, val_results = self.backend.list_values(obj_offset, 20,\n prop)\n facet['values'] = [{\n 'prop_uri': facet['uri_enc'],\n 'value_enc': quote_plus(v['link']),\n 'value': v['label'][:100],\n 'count': v['count'],\n 'offset': obj_offset} for v in val_results]\n if mv:\n facet['more_values'] = obj_offset + 20\n facets.append(facet)\n\n start_response(\n '200 OK', [('Content-type', 'text/html; charset=utf-8')])\n query = \"\"\n if prop:\n query += \"&prop=\" + quote_plus(prop[1:-1])\n if obj:\n query += \"&obj=\" + quote_plus(obj)\n if obj_offset:\n query += \"&obj_offset=\" + obj_offset\n\n results2 = [{\n \"title\": r[\"label\"],\n \"link\": r[\"link\"],\n \"model\": from_model(\n self.backend.summarize(r[\"id\"]),\n BASE_NAME + r[\"id\"])}\n for r in results]\n mres = pystache.render(template, {\n 'facets': facets,\n 'results': results2,\n 'has_prev': has_prev,\n 'prev': prev,\n 'has_next': has_next,\n 'next': nxt,\n 'pages': pages,\n 'query': query,\n 'context': CONTEXT})\n return [self.render_html(DISPLAY_NAME, mres).encode('utf-8')]", "def get_cards():\n with open(\"mashape_key.txt\", \"r\") as mashape_key:\n api_key = mashape_key.read()\n print(api_key)\n url = \"https://omgvamp-hearthstone-v1.p.mashape.com/cards?collectible=1\"\n headers = {\"X-Mashape-Key\": api_key}\n response = requests.get(url, headers=headers)\n cards = json.loads(response.text)\n return cards", "def get_all(self, start=0, count=-1, filter='', query='', sort=''):\n return self._client.get_all(start, count, filter=filter, sort=sort, query=query)" ]
[ "0.5929701", "0.56348145", "0.5483227", "0.52740085", "0.5245021", "0.5158718", "0.51479316", "0.5110173", "0.5082926", "0.5076762", "0.5074356", "0.5069807", "0.50454044", "0.5042", "0.50204206", "0.5005391", "0.50021154", "0.50005645", "0.49998033", "0.4996675", "0.49888542", "0.49830133", "0.49668285", "0.495767", "0.49387127", "0.49366817", "0.49277282", "0.4923681", "0.4923474", "0.49195915" ]
0.822305
0
rightpad a string with zeros to the given length
def _rzfill(string, to_len): if len(string) > to_len: raise ValueError("string is already longer than to_len") return string + '0' * (to_len - len(string))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def r_pad(arg, length):\n if length <= len(arg):\n return arg\n else:\n return arg + \" \" * (length - len(arg))", "def rightpad(field, length):\r\n field = str(field)\r\n field_length = len(field)\r\n if field_length>length:\r\n field = field[:length]\r\n if field_length<length:\r\n while len(field)<length:\r\n field+=' '\r\n return field.upper()", "def _padright(width, s):\n fmt = \"{0:<%ds}\" % width\n return fmt.format(s)", "def rjust(value, length):\n\n if value is None or value == '':\n value = '0'\n else:\n value = str(value)\n value = value.rjust(length, '0')\n return value", "def padding_zeroes(number, length_string):\n return str(number).zfill(length_string)", "def pad_right(data, padding_char, length):\n if is_null_or_empty(padding_char):\n padding_char = DEFAULT_PAD_CHAR\n\n string_buf = data\n for the_char in itertools.repeat(padding_char, length):\n string_buf += the_char\n\n return string_buf", "def right_pad(message, pad_to=20, pad_with=' '):\n message = str(message)\n while len(message) < pad_to:\n message = message + pad_with\n return message", "def pad_to_len(string, length, leftpad=False, pad_char=None):\n pad_len = length - len(string)\n if not pad_char:\n pad_char = chr(pad_len)\n pad = pad_char * pad_len\n return pad + string if leftpad else string + pad", "def str_padding(length, val):\n return '{0:<{fill}}'.format(val, fill=length)", "def pad(number, width=0):\n return str(number).zfill(width)", "def pad_number(number, length):\n\n string_number = str(number)\n number_of_zeros = length - len(string_number)\n if number_of_zeros >= 0:\n return \"0\" * number_of_zeros + string_number\n else:\n return string_number", "def pad(value, digits, to_right=False):\n len_val = len(value)\n assert len_val <= digits\n rem_digits = digits - len_val\n if to_right:\n return value + \"0\"*rem_digits\n else:\n return \"0\"*rem_digits + value", "def len_unpadded(self) -> int:", "def pad_str_left(string, length: int, add: str) -> str:\n out_string = string\n while len(out_string) < length:\n out_string = add + out_string\n return out_string", "def filter_pad(val: Union[int, str], width: int, fillchar: str = '0') -> str:\n return str(val).rjust(width, fillchar)", "def _padboth(width, s):\n fmt = \"{0:^%ds}\" % width\n return fmt.format(s)", "def _pad_shorter(sequence: str) -> str:\n return sequence.ljust(3, \"X\")", "def left_zero_pad(s, blocksize):\n if blocksize > 0 and len(s) % blocksize:\n s = (blocksize - len(s) % blocksize) * b('\\000') + s\n return s", "def un_pad(s):\n return s[0:-ord(s[-1])]", "def pad(s):\n return s + (16 - len(s) % 16) * chr(16 - len(s) % 16)", "def pad_sequence(sequence, max_length, pad):\n padN = max(max_length - len(sequence), 0)\n result = sequence[:max_length - padN] + [pad] * padN\n return result", "def pad_end(self, my_str, c, final_length):\n while len(my_str) != final_length:\n my_str = my_str + c\n return my_str", "def unpad(plain):\n return plain[:-ord(plain[-1])]", "def zeroPad(numberString, zeros, left = True):\n for i in range(zeros):\n if left:\n numberString = '0' + numberString\n else:\n numberString = numberString + '0'\n return numberString", "def _padleft(width, s):\n fmt = \"{0:>%ds}\" % width\n return fmt.format(s)", "def ljust(value, length):\n\n if value is None:\n value = ''\n else:\n value = str(value)\n value = value.ljust(length, ' ')\n return value", "def left_fill(s, n, x=\"0\"):\n sl = len(s)\n zn = n - sl\n if zn > 0:\n return zn*\"0\" + s\n else:\n return s", "def int_padding(length, val, direction=\">\"):\n return '{0:0{direction}{fill}}'.format(val, direction=direction, fill=length)", "def pad_left(data, padding_char, length):\n if is_null_or_empty(padding_char):\n padding_char = DEFAULT_PAD_CHAR\n\n string_buf = EMPTY\n for the_char in itertools.repeat(padding_char, length):\n string_buf += the_char\n\n string_buf += data\n return string_buf", "def pad(plain, size):\n offset = size - (len(plain) % size)\n return plain + chr(offset) * offset" ]
[ "0.8048082", "0.8042724", "0.8026424", "0.78363734", "0.78341615", "0.76930463", "0.7581309", "0.74894637", "0.73814124", "0.7381266", "0.7312482", "0.7301371", "0.72033757", "0.71582454", "0.7100653", "0.70349735", "0.7018298", "0.70045847", "0.6971008", "0.6954787", "0.6918977", "0.68289155", "0.6827072", "0.68180025", "0.6796567", "0.6783012", "0.6736561", "0.67210275", "0.66609657", "0.6620362" ]
0.8118524
0
Tries to detect a pluggedin YubiKey else alerts user
def detect_yubikey(self): try: self.yk = yubico.find_yubikey() self.version.set("Version:%s" % self.yk.version()) self.serial.set("Serial:%s" % self.yk.serial()) except yubico.yubikey.YubiKeyError: self.version.set("No YubiKey detected") self.serial.set("") self.yk = None except yubico.yubikey_usb_hid.usb.USBError as e: self.version.set("No YubiKey detected") self.serial.set("") self.user_message.set( "A USB error occurred:%s - do you have permission to access USB devices?", e.message )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def detect_infrared():\n try:\n count = 0\n while True:\n if GPIO.input(PIN_NO) == True:\n count += 1\n print('[+] Detected ' + str(count))\n output_sound()\n send_message()\n time.sleep(2)\n except Exception as e:\n GPIO.cleanup()", "def _auth_plugin_available(ext):\n return ext.obj.available", "def CheckPluggedIn(self):\n if self.wired.wired_interface and self.wired.wired_interface != \"None\":\n return self.wired.CheckPluggedIn()\n else:\n return None", "def is_haiku():\n pass", "def test_validate_yubikey(self):\n from_key = self.yk_rnd.from_key(self.yk_public_id, self.yk_key)\n self.assertTrue(pyhsm.yubikey.validate_yubikey_with_aead( \\\n self.hsm, from_key, self.aead.data, self.kh_validate))", "def verify(serial, udp):\n\n if udp:\n solo.fido2.force_udp_backend()\n\n # Any longer and this needs to go in a submodule\n print(\"Please press the button on your Solo key\")\n try:\n cert = solo.client.find(serial).make_credential()\n except Fido2ClientError:\n print(\"Error getting credential, is your key in bootloader mode?\")\n print(\"Try: `solo program aux leave-bootloader`\")\n sys.exit(1)\n\n solo_fingerprint = b\"r\\xd5\\x831&\\xac\\xfc\\xe9\\xa8\\xe8&`\\x18\\xe6AI4\\xc8\\xbeJ\\xb8h_\\x91\\xb0\\x99!\\x13\\xbb\\xd42\\x95\"\n hacker_fingerprint = b\"\\xd0ml\\xcb\\xda}\\xe5j\\x16'\\xc2\\xa7\\x89\\x9c5\\xa2\\xa3\\x16\\xc8Q\\xb3j\\xd8\\xed~\\xd7\\x84y\\xbbx~\\xf7\"\n udp_fingerprint = b\"\\x05\\x92\\xe1\\xb2\\xba\\x8ea\\rb\\x9a\\x9b\\xc0\\x15\\x19~J\\xda\\xdc16\\xe0\\xa0\\xa1v\\xd9\\xb5}\\x17\\xa6\\xb8\\x0b8\"\n\n if cert.fingerprint(hashes.SHA256()) == solo_fingerprint:\n print(\"Valid Solo Secure firmware from SoloKeys\")\n elif cert.fingerprint(hashes.SHA256()) == hacker_fingerprint:\n print(\"Valid Solo Hacker firmware\")\n elif cert.fingerprint(hashes.SHA256()) == udp_fingerprint:\n print(\"Local software key\")\n else:\n print(\"Unknown fingerprint! \", cert.fingerprint(hashes.SHA256()))", "def verify(serial, udp):\n\n if udp:\n solo.fido2.force_udp_backend()\n\n # Any longer and this needs to go in a submodule\n print(\"Please press the button on your Solo key\")\n try:\n cert = solo.client.find(serial).make_credential()\n except Fido2ClientError:\n print(\"Error getting credential, is your key in bootloader mode?\")\n print(\"Try: `solo program aux leave-bootloader`\")\n sys.exit(1)\n\n solo_fingerprint = b\"r\\xd5\\x831&\\xac\\xfc\\xe9\\xa8\\xe8&`\\x18\\xe6AI4\\xc8\\xbeJ\\xb8h_\\x91\\xb0\\x99!\\x13\\xbb\\xd42\\x95\"\n hacker_fingerprint = b\"\\xd0ml\\xcb\\xda}\\xe5j\\x16'\\xc2\\xa7\\x89\\x9c5\\xa2\\xa3\\x16\\xc8Q\\xb3j\\xd8\\xed~\\xd7\\x84y\\xbbx~\\xf7\"\n udp_fingerprint = b\"\\x05\\x92\\xe1\\xb2\\xba\\x8ea\\rb\\x9a\\x9b\\xc0\\x15\\x19~J\\xda\\xdc16\\xe0\\xa0\\xa1v\\xd9\\xb5}\\x17\\xa6\\xb8\\x0b8\"\n\n if cert.fingerprint(hashes.SHA256()) == solo_fingerprint:\n print(\"Valid Solo Secure firmware from SoloKeys\")\n elif cert.fingerprint(hashes.SHA256()) == hacker_fingerprint:\n print(\"Valid Solo Hacker firmware\")\n elif cert.fingerprint(hashes.SHA256()) == udp_fingerprint:\n print(\"Local software key\")\n else:\n print(\"Unknown fingerprint! \", cert.fingerprint(hashes.SHA256()))", "def authenticate_bluetooth(self):\n data = self.blu.main()\n if bool(data) == True:\n authentication = self.client.validate_mac(\n data[\"mac_address\"], data[\"email\"]).decode(\"utf-8\")\n if authentication == \"valid\":\n self.current_email = data[\"email\"]\n self.unlock_time = round(datetime.now().timestamp())\n self.display_successful_unlock_eng()\n elif authentication == \"invalid\":\n print(self.INVALID_USER)\n time.sleep(3)\n self.display_main()\n else:\n self.display_eng()", "def is_available():", "def get_hypixel_key(self):\n key = self.bot_data_file[\"apiKeys\"][\"hypixel\"]\n if self.check_empty_key(key):\n return key\n else:\n print(\"ERROR GETTING THE HYPIXEL KEY (get yours from https://api.hypixel.net/) - ABORTING\")\n quit(1)", "def check():\n hokusai.check()", "def _try_connect(self, user_input: dict[str, Any]) -> str | None:\n try:\n smartplug = SmartPlug(\n user_input.get(CONF_HOST, self.ip_address),\n user_input[CONF_PASSWORD],\n user_input[CONF_USERNAME],\n user_input[CONF_USE_LEGACY_PROTOCOL],\n )\n except Exception as ex: # pylint: disable=broad-except\n _LOGGER.exception(\"Unexpected exception: %s\", ex)\n return \"unknown\"\n if not smartplug.authenticated and smartplug.use_legacy_protocol:\n return \"cannot_connect\"\n return None", "def can_mi():\n pass", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def test_guest_os(self):\n self.check_guest_os()", "def is_valid_yubikey_format(otp: str) -> bool:\n\n return ALPHABET_RE.match(otp) and True or False" ]
[ "0.5550443", "0.54399776", "0.5384098", "0.5384029", "0.535033", "0.5321742", "0.5321742", "0.527354", "0.5258836", "0.52223456", "0.5207884", "0.5207635", "0.5153576", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5151934", "0.5095663" ]
0.7784908
0
Display the Entry text value.
def displayText(self): if self.entryWidget.get().strip() == "": tkMessageBox.showerror("Tkinter Entry Widget", "Enter a text value") else: self.file_com.write(self.entryWidget.get().strip()+'\n')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_entry(text):\n print \"Text entered: \\n '%s'\" % text", "def value (self):\r\n return self.entry.get()", "def display_entry(self, entry):\n border = '-' * 50\n print(border)\n print('Employee: {}'.format(entry.employee_name))\n print('Task Name: {}'.format(entry.task_name))\n print(\"Date: {}\".format(entry.date))\n print(\"Time Spent: {}\".format(entry.time_spent))\n if entry.notes != '':\n print(\"Notes:\\n{}\\n{}\".format('----------', entry.notes))\n print(border)", "def textentry(self, parent, variable, label):\n # pack a label and entry horizontally in a frame:\n f = Frame(parent)\n f.pack(side='top', padx=2, pady=2)\n l = Label(f, text=label)\n l.pack(side='left')\n widget = Entry(f, textvariable=variable, width=8)\n widget.pack(side='left', anchor='w')\n return widget", "def value(self):\n return str(self.input.currentText())", "def value(self):\n return str(self.input.text())", "def getText():", "def getText():", "def getText():", "def getText():", "def getText():", "def retrieve_input():\r\n inputValue = simpleText.get(\"1.0\",\"end-1c\") #Our Variable\r\n #\"1.0\" = start from first character in the text widget\r\n #\"end-1c = delete the last character that Text creates every time\"\r\n return inputValue", "def getValue(self):\n return self.field.currentText()", "def on_text_box(self, event):\n text_box_value = self.text_box.GetValue()\n text = \"\".join([_(u\"New text box value: \"), text_box_value])\n if self.state == 0:\n self.canvas_2d.render(text)\n else:\n self.canvas_3d.render()", "def messageEntry(self,message,default=''):\n dlg = wx.TextEntryDialog(self, message,self.app.title, default)\n if dlg.ShowModal() == wx.ID_OK:value=dlg.GetValue()\n else:value=None\n dlg.Destroy()\n return value", "def render(self, **kwargs):\r\n return h.text_field(self.name, value=self.value, **kwargs)", "def do_display(self, arg):\n try:\n value = self._getval_or_undefined(arg)\n except:\n return\n self._get_display_list()[arg] = value", "def getValue(self):\n return self.field.text()", "def getText(self):", "def text(self):\n return self.label.text()", "def storeTextEditValue(self):\n\n\t\tcategory, attr = self.getWidgetMeta(self.sender())\n\t\tvalue = self.sender().toPlainText()\n\t\tself.storeValue(category, attr, value)", "def text(self):\n if hasattr(self,'label'):\n return str(self.label.text())\n else:\n return self.key", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def display_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"display_value\")", "def entry(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"entry\")", "def setValue(self,val):\n self.input.setText(str(val))", "def editText(self, item):\n storedText = item.data.get(self.name, '')\n result = self.formatEditText(storedText)\n if self.isRequired and not result[0]:\n return (result[0], False)\n return result", "def askText(parent,message,title='',default=''):\r\n dialog = wx.TextEntryDialog(parent,message,title,default)\r\n if dialog.ShowModal() != wx.ID_OK:\r\n dialog.Destroy()\r\n return None\r\n else:\r\n value = dialog.GetValue()\r\n dialog.Destroy()\r\n return value", "def entry(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"entry\")", "def evaluate(self, event):\n self.res.configure(text=\"Result: \" + str(eval(expression_converter(self.entry.get()))))" ]
[ "0.6926207", "0.6598252", "0.6590535", "0.65623057", "0.6552865", "0.6468197", "0.64450836", "0.64450836", "0.64450836", "0.64450836", "0.64450836", "0.6352432", "0.6344944", "0.6339035", "0.6331184", "0.6292854", "0.62803704", "0.6258875", "0.62545305", "0.62507486", "0.6148498", "0.6136056", "0.6134113", "0.6134113", "0.6063544", "0.6041873", "0.60397816", "0.60347724", "0.6032261", "0.60275084" ]
0.76484877
0
Create and configure connexion app.
def create_app(env): connexion_app = connexion.App(__name__, specification_dir='openapi/', options={'swagger_url': '/swagger'}) app = connexion_app.app env_config_class_map = { 'prod': 'config.Prod', 'testing': 'config.Testing', 'dev': 'config.Dev' } config_class = env_config_class_map.get(env) app.config.from_object(config_class) print(app.config) app.redis = Redis.from_url(app.config['REDIS_URI']) app.default_task_queue = rq.Queue('default', connection=app.redis, ttl=-1) with app.app_context(): import config as flask_config app.after_request(flask_config.request_logger) app.register_blueprint(rq_dashboard.blueprint, url_prefix='/rq') connexion_app.add_api('spec.yaml') return connexion_app
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_app(self):\r\n self.app = Flask(__name__, instance_relative_config=True)\r\n\r\n # Init the secret key of the app -it is a must for flask to run\r\n self.app.config.from_mapping(\r\n SECRET_KEY='!ZNeverSayNever116Z!',\r\n MONGODB_SETTINGS= {'host': 'mongodb://localhost/opc_integrity'}\r\n )\r\n initialize_db(self.app)\r\n\r\n\r\n # Init the app with core routes\r\n routes.init_app(self.app)", "def create_app(config: dict) -> Flask:\n for key, value in config.items():\n app.config[key] = value\n db.init_app(app)\n ma.init_app(app)\n app.app_context().push()\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.register_blueprint(auth_bp, url_prefix='/auth')\n app.register_blueprint(errors_bp, url_prefix='/error')\n app.config.from_object('config.Config')\n\n db.init_app(app)\n store.bind(db)\n login_manager.init_app(app)\n Session(app)\n captcha = FlaskSessionCaptcha(app)\n captcha.init_app(app)\n\n\n with app.app_context():\n from . import routes # Import routes\n db.create_all() # Create sql tables for our data models\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n flask_bcrypt.init_app(app)\n jwt.init_app(app)\n\n with app.app_context():\n # Import Blueprints\n from .routes.users_route import users_bp\n from .routes.messages_route import messages_bp\n\n # REGISTER ROUTES\n app.register_blueprint(users_bp, url_prefix=\"/users\")\n app.register_blueprint(messages_bp, url_prefix=\"/messages\")\n\n\n return app", "def create_app():\n app = Flask( __name__ , instance_relative_config=False)\n #: Get MySQL config from config.py\n app.config.from_pyfile('config.py')\n mysql.init_app(app)\n with app.app_context():\n return app", "def app():\n return create_app()", "def create_app(config_name='development'):\n\tapp = Flask(__name__,instance_relative_config=True)\n\tapp.config.from_object(APP_CONFIG[config_name])\n\n\turl = app.config.get('DATABASE_URL')\n\turl = app.config.get('DATABASE_URL')\n\tCORS(app, resources={r\"/api/*\": {\"origins\": \"*\"}})\n\n\tapp.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER\n\n\tcreate_tables(url)\n\tpublic_id = str(uuid.uuid4())\n\tif config_name == 'testing':\n\t\tpublic_id = \"f3b8a1c3-f775-49e1-991c-5bfb963eb419\"\n\tcreate_super_user(url, public_id)\n\n\tapp.register_error_handler(404, url_not_found)\n\tapp.url_map.strict_slashes = False\n\n\n\tapp.register_blueprint(v1)\n\tapp.register_blueprint(v2)\n\treturn app", "def create_app():\n app = Flask(__name__)\n app.config.from_object('app.configs.config')\n app.config.from_object('app.configs.settings')\n return app", "def create_app():\n app = FastAPI()\n configure_rest_server(app=app, router_configs=WEB_SERVICES_ROUTER_CONFIGS, db_configs=DB_CONFIGS)\n return app", "def create_app():\n app = Flask(__name__)\n\n app.config.from_pyfile('../settings.py')\n\n app.register_blueprint(layout_bp, url_prefix='/layouts')\n app.register_blueprint(sheet_bp, url_prefix='/sheets')\n app.register_blueprint(user_bp, url_prefix='/users')\n\n db.init_app(app)\n ma.init_app(app)\n migrate.init_app(app)\n login_manager.init_app(app)\n\n return app", "def create_app():\n logging.basicConfig(\n level=REANA_LOG_LEVEL,\n format=REANA_LOG_FORMAT\n )\n app = Flask(__name__)\n app.config.from_object('reana_server.config')\n app.secret_key = \"hyper secret key\"\n\n # Register API routes\n from .rest import ping, secrets, users, workflows # noqa\n app.register_blueprint(ping.blueprint, url_prefix='/api')\n app.register_blueprint(workflows.blueprint, url_prefix='/api')\n app.register_blueprint(users.blueprint, url_prefix='/api')\n app.register_blueprint(secrets.blueprint, url_prefix='/api')\n\n app.session = Session\n CORS(app)\n return app", "def create_app(config_path: str):\n\n if not os.path.exists(config_path):\n raise OSError(f\"Configuration file {config_path} does not exist\")\n\n # create flask app\n app = Flask(__name__)\n\n # add app configration \n app.config.from_pyfile(config_path)\n\n # initialize database \n db.init_app(app)\n logger.info(f\"Initializing app with database from {app.config['SQLALCHEMY_DATABASE_URI']}\")\n\n # initialize api enpoints\n from deekoo_auth.endpoints import api_endpoints\n app.register_blueprint(api_endpoints)\n\n return app", "def create_app(config_name=\"development\"):\n # return app with config file on config folder\n app = Flask(__name__)\n\n # get default settings for app\n app.config.from_object(\"app_name.settings\")\n\n # load according config object\n app.config.from_object(app_config.config[config_name])\n\n # run classmethod to init app with Flask-DotEnv\n app_config.config[config_name].init_app(app)\n\n # register blueprints\n app.register_blueprint(api_mod, url_prefix=\"/api\")\n app.register_blueprint(mock_module, url_prefix=\"/api\")\n app.register_blueprint(support_ticket_module, url_prefix=\"/api\")\n \n # enable cors\n CORS(app)\n\n with app.app_context():\n # if config_name != \"testing\":\n # init db instance\n db.init_app(app)\n\n # migrate for Flask-Migrate\n migrate.init_app(app, db)\n\n return app", "def create_app(self):\n app = Flask(__name__)\n\n app.config[\"auth_func\"] = self.auth_func\n app.config[\"hydrator_func\"] = self.hydrator_func\n app.config[\"request_hydrator_func\"] = self.request_hydrator_func\n app.config[\"database_uri\"] = self.database_uri\n app.config[\"hmac_secret\"] = self.hmac_secret\n\n cors = CORS()\n cors.init_app(app, resources={r\"/*\": {\"origins\": self.cors_origins, \"supports_credentials\": True}})\n\n app.register_blueprint(api_v0.bp)\n\n @app.route(\"/\")\n def health_check():\n \"\"\"Can be called by e.g. Kubernetes to verify that the API is up\n\n Returns:\n str: the static string \"Comet-API\", could be anything\n \"\"\"\n return \"Comet-API\"\n\n return app", "def app():\n app = create_app()\n return app", "def create_app(test_config=None):\n app = Flask(__name__, instance_relative_config=True)\n app.config.from_mapping(\n SECRET_KEY=os.environ.get('FLASK_SECRET_KEY', 'dev'),\n # SQLALCHEMY_DATABASE_URI='sqlite:////' + os.path.join(app.instance_path, 'app.sqlite'),\n SQLALCHEMY_DATABASE_URI=os.environ.get('FLASK_SQLALCHEMY_DATABASE_URI'),\n SQLALCHEMY_TRACK_MODIFICATIONS=False,\n )\n\n if test_config is None:\n app.config.from_pyfile('config.py', silent=True)\n else:\n app.config.from_mapping(test_config)\n\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n\n # Set custom json encoder\n app.json_encoder = JSONEncoder\n\n # SQLAlchemy\n from tuinbouwer_server_api.models import db, migrate\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Apscheduler\n from tuinbouwer_server_api.scheduler import scheduler, start_jobs\n scheduler.init_app(app)\n scheduler.start()\n start_jobs()\n \n # CORS\n CORS(app, resources={r'/*': {'origins': '*'}})\n\n # Website\n app.register_blueprint(website.frontend.blueprint)\n \n # API\n app.register_blueprint(api.sensor.blueprint)\n app.register_blueprint(api.frontend.blueprint)\n\n\n return app", "def create_app():\n app = Flask(__name__)\n\n # Load application settings\n settings = os.environ.get(\"FLASK_SETTINGS\", SETTINGS)\n if settings is not None:\n c = Config(settings)\n print(c)\n app.config.update(c.get_map('flask'))\n\n from users.views import user\n # Register the blueprints to app\n app.register_blueprint(user)\n\n db.init_app(app)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n app.config.from_object('config.Config')\n db.init_app(app)\n ma.init_app(app)\n migrate = Migrate(app, db)\n\n with app.app_context():\n from . import routes\n\n # Create tables for our models\n db.create_all()\n app.logger.info(\"application started\")\n\n return app", "def create_app(self):\n app.config.from_object('config.TestingConfig')\n return app", "def create_app(config_name):\n\n app = Flask(__name__)\n app.config.from_object(config_by_name[config_name])\n CORS(app)\n mongo.init_app(app)\n app.register_blueprint(check_bp)\n\n return app", "def create_app():\n\n # Create app\n app = Flask(__name__)\n app.config.from_object(\"nextbus.config.Config\")\n\n app.logger = logger.app_logger\n # Load logging configuration and log initial configuration\n logger.load_config(app)\n\n # Initialise SQLAlchemy and Migrate in app\n db.init_app(app)\n migrate.init_app(app, db)\n\n # Adding app, db and model objects to flask shell\n from nextbus import models\n app.shell_context_processor(\n lambda: {\"app\": app, \"db\": db, \"models\": models}\n )\n\n from nextbus.converters import add_converters\n add_converters(app)\n\n from nextbus.views import page\n from nextbus.resources import api\n app.register_blueprint(page)\n app.register_blueprint(api)\n\n return app", "def create_app(config_name):\n app = Flask(__name__)\n # create app instance\n app.config.from_object(config_by_name[config_name])\n flask_bcrypt.init_app(app)\n\n CORS(app)\n\n routes.init_routes(app)\n\n return app", "def create_app():\n app = Flask(\n __name__,\n instance_relative_config=False,\n )\n app.config.from_object('config.Config')\n\n with app.app_context():\n # CORS\n CORS(app)\n\n # JWT & BCRYPT\n from .utils.auth import init_auth\n init_auth(app)\n\n # DB\n from .utils.db import db\n db.init_app(app)\n\n # Mail\n from .utils.mail.service import mail\n mail.init_app(app)\n app.extensions['mail'].debug = 0 # No logging\n\n # Jobs\n from .utils.scheduler import start_jobs\n start_jobs(app)\n\n # Import routes\n from .routes import (\n admin, users, files,\n suprema,\n b_locals, b_federals)\n\n app.register_blueprint(admin.bp)\n app.register_blueprint(users.bp)\n app.register_blueprint(files.bp)\n app.register_blueprint(suprema.bp)\n app.register_blueprint(b_locals.bp)\n app.register_blueprint(b_federals.bp)\n\n return app", "def create_app():\n app = Flask(__name__, instance_relative_config=False)\n # app.config.from_object('config.Config')\n file_path = os.path.abspath(os.getcwd())+\"/mpulse.db\"\n app.config.from_mapping(\n SECRET_KEY='dev',\n SQLALCHEMY_DATABASE_URI = 'sqlite:///'+file_path,\n SCHEMA=os.path.join(os.path.dirname(__file__), 'schema.sql'),\n SQLALCHEMY_TRACK_MODIFICATIONS = False,\n JSON_SORT_KEYS=False\n )\n\n # ensure the instance folder exists\n try:\n os.makedirs(app.instance_path)\n except OSError:\n pass\n \n # init database\n db.init_app(app)\n \n with app.app_context():\n \n # Create tables if they don't exist\n db.create_all() \n \n # Include our api Routes for members\n from . import members\n # Register Blueprints\n app.register_blueprint(members.bp)\n\n return app", "def create_app(config=Config):\r\n # Initialise app and configuration\r\n app = Flask(__name__)\r\n app.config.from_object(config)\r\n\r\n\r\n # Initialise flask plugins\r\n db.init_app(app)\r\n api.init_app(app)\r\n ma.init_app(app)\r\n login.init_app(app)\r\n migrate.init_app(app, db)\r\n register_api(api)\r\n\r\n\r\n return app", "def create_app(self):\n\n app = create_app()\n app.config.from_object('project.config.TestingConfig')\n return app", "def setup_app():\n\n # 1 Create Flask application\n app = Flask(\n import_name=__name__,\n template_folder=\"templates\",\n static_folder=\"static\"\n )\n\n # 2 Update the apps configuration\n app = config_selector(app)\n register_error_handlers(app)\n\n cache.init_app(app)\n\n # 3 Set up logger\n setup_logger(app.config)\n LOGGER.info(\"Set up app & logger.\")\n\n # 4 Init clients\n init_clients(app.config)\n\n # 5 Init Daemon\n start_daemon(app.config)\n\n # 6 Register blueprints\n register_blueprints(app)\n Bootstrap(app)\n\n return app", "def create_app():\n app = Flask(__name__)\n\n # Used by Flask to secure data\n app.config['SECRET_KEY'] = 'super-secret-secure-key'\n # Path to save the Database\n app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'\n\n # Initialize the Database\n db.init_app(app)\n\n # Set up login manager\n from source.models import manage_login\n manage_login(app)\n\n # Blueprint for auth routes\n from source.auth import auth as auth_blueprint\n app.register_blueprint(auth_blueprint)\n\n # Blueprint for non-auth routes\n from source.main import main as main_blueprint\n app.register_blueprint(main_blueprint)\n\n return app", "def init_app(db_name=None):\n\n # create a Flask app, force SSL when debug is False\n app = Flask(__name__, static_folder='./ui/static')\n app.config.from_pyfile('config.py')\n\n # load custom config file\n custom_config = app.config['CUSTOM_CONFIG_PATH']\n if os.environ.get(custom_config) is not None:\n app.config.from_envvar(custom_config)\n\n # setup\n app.db = db.connect(db_name)\n setup_auth(app)\n SSLify(app, subdomains=True)\n\n # register blueprints\n app.register_blueprint(api, url_prefix='/api')\n app.register_blueprint(auth)\n app.register_blueprint(ui)\n\n return app", "def create_app(config_name = None):\n #import pdb; pdb.set_trace()\n app = Flask(__name__, instance_relative_config = False)\n #app = Flask(__name__)\n app.config.from_object('config.DevelopmentConfig')\n\n app.config['SECRET_KEY'] = Config.SECRET_KEY\n app.config['CONFIG_NAME'] = config_name\n\n #load the appropriate configuration\n if config_name == 'dev' or 'test':\n app.config['ENV'] = 'development'\n app.config['TESTING'] = True\n app.config['DEBUG'] = True\n SQLITEDB = Config.SQLITEDB\n app.config['SQLALCHEMY_DATABASE_URI'] = SQLITEDB\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n elif config_name == 'prod':\n DRIVER = Config.DRIVER\n SERVER = Config.SERVER\n DATABASE = Config.DATABASE\n UID = Config.UID\n PWD = Config.PWD\n params = urllib.parse.quote_plus('DRIVER={'+ DRIVER +'};SERVER='+ SERVER +';DATABASE=' + DATABASE +';UID=' + UID +';PWD='+ PWD +';')\n app.config['SQLALCHEMY_DATABASE_URI'] = \"mssql+pyodbc:///?odbc_connect=%s\" % params\n app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False\n app.config['ENV'] = 'production'\n app.config['TESTING'] = False\n else:\n SQLITEDB = Config.SQLITEDB\n app.config['SQLALCHEMY_DATABASE_URI'] = SQLITEDB\n\n #app.config.from_object(config[config_name])\n #app.config.from_object('config.DevelopmentConfig')\n config[config_name].init_app(app)\n\n # bootstrap.init_app(app)\n # mail.init_app(app)\n # moment.init_app(app)\n\n db = SQLAlchemy(app)\n #Migrate(app, db)\n\n db.init_app(app)\n\n # initialize login manager\n login_manager.init_app(app)\n # tell login manager what the HTML view is\n login_manager.login_view = 'sign_in'\n\n with app.app_context():\n # define the index page\n @app.route('/')\n def index():\n return render_template('index.html')\n\n # include the routes\n from iguanadenstudios.about_us.routes import about_us_blueprint\n from iguanadenstudios.audiotools.routes import audiotools_blueprint\n from iguanadenstudios.booking.routes import booking_blueprint\n from iguanadenstudios.tracklists.routes import tracklists_blueprint\n from iguanadenstudios.mastering.routes import mastering_blueprint\n from iguanadenstudios.register.routes import register_blueprint\n from iguanadenstudios.sign_in.routes import sign_in_blueprint\n from iguanadenstudios.upload.routes import upload_blueprint\n from iguanadenstudios.error_pages.handlers import error_pages\n\n # register blueprints\n # using url_prefix allows the changing of how the url displays. the below example produces:\n # /about_us/about_us\n # app.register_blueprint(about_us_blueprint, url_prefix = '/about_us')\n app.register_blueprint(about_us_blueprint)\n app.register_blueprint(audiotools_blueprint)\n app.register_blueprint(booking_blueprint)\n app.register_blueprint(tracklists_blueprint)\n app.register_blueprint(mastering_blueprint)\n app.register_blueprint(register_blueprint)\n app.register_blueprint(sign_in_blueprint)\n app.register_blueprint(upload_blueprint)\n app.register_blueprint(error_pages)\n\n return app" ]
[ "0.74086744", "0.71356195", "0.71240467", "0.7108809", "0.7074317", "0.7031925", "0.70253813", "0.7023434", "0.7022537", "0.7013904", "0.70036703", "0.69989914", "0.69644254", "0.6951114", "0.69442946", "0.693363", "0.6925931", "0.6923495", "0.6898156", "0.68892235", "0.68874353", "0.6879467", "0.6857361", "0.68300545", "0.6814", "0.6810603", "0.6789867", "0.67521", "0.6751431", "0.6737694" ]
0.7772478
0
Adds the commit to the commits array if it doesn't already exist, and returns the commit's index in the array.
def add_commit(self, commit): sha1 = commit.hex if sha1 in self._commits: return self._commits[sha1] title, separator, body = commit.message.partition("\n") commit = { 'explored': False, 'sha1': sha1, 'name': GitUtils.abbreviate_sha1(sha1), 'describe': GitUtils.describe(sha1), 'refs': GitUtils.refs_to(sha1, self.repo()), 'author_name': commit.author.name, 'author_mail': commit.author.email, 'author_time': commit.author.time, 'author_offset': commit.author.offset, 'committer_name': commit.committer.name, 'committer_mail': commit.committer.email, 'committer_time': commit.committer.time, 'committer_offset': commit.committer.offset, # 'message': commit.message, 'title': title, 'separator': separator, 'body': body.lstrip("\n"), } self._json['commits'].append(commit) self._commits[sha1] = len(self._json['commits']) - 1 return self._commits[sha1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_commit(self, commit_id):\n commit = next((\n commit for commit in self.repo.iter_commits()\n if commit.hexsha == commit_id\n ), None)\n if commit is None:\n raise Exception(f'Commit {commit_id} not found!')\n return commit", "def commit_id(self):\n return self._commit_id", "def get_last_commit_id(commits):\n print(commits)\n if bool(commits):\n return commits[-1].get('id')\n return \"no commits\"", "def get_commit_id():\n return about.get_commit_id()", "def commit(self, commit_message):\n self.git_repo.index.commit(commit_message)", "def commits() -> None:\n project = get_project(require=True)\n commits_data = request('get', f'/api/v0/projects/{project.id}/commits/').json()\n current_commit = None\n try:\n current_commit = get_current_commit(project.directory)\n except Exception:\n pass\n\n # Filter out ad-hoc executions (and remove the adhocness marker)\n commits_data = [commit for commit in commits_data if not commit.pop('adhoc', False)]\n\n # Mark the current commit\n for commit in commits_data:\n if commit['identifier'] == current_commit:\n commit['identifier'] += ' (current)'\n\n print_table(commits_data)", "def _insert_into_new_columns(self, commit, mapping_index):\n for i in range(self.num_new_columns):\n if self.new_columns[i].commit == commit:\n self.mapping[mapping_index] = i\n return mapping_index + 2\n\n # This commit isn't already in new_columns. Add it.\n column = Column(commit, self._find_commit_color(commit))\n self.new_columns[self.num_new_columns] = column\n self.mapping[mapping_index] = self.num_new_columns\n self.num_new_columns += 1\n return mapping_index + 2", "def commit_of_ref(self, ref):\n # Check cache\n if ref in self._refs:\n return self._refs[ref]\n\n commit = self._get_commit_from_ref(ref)\n self._refs[ref] = \"\"\n if commit:\n self._refs[ref] = str(commit.id)\n\n return self._refs[ref]", "def _get_git_commit_id():\n from git import Repo\n from os.path import split, dirname\n path = split(dirname(__file__))[0]\n commit_id = Repo(path).head.object.hexsha\n return commit_id[:8]", "def get_first_commit_contains(self, file):\n\n commits = self.get_commits_contains(file)\n return commits[-1] if commits else None", "def commit_names(self, commit):\n return []", "def commit():\n query = {\"type\": \"commit\", \"cmd\": \"<commit></commit>\"}\n\n return _get_job_results(query)", "def get_commit_count():\n if COMMIT_COUNT is None:\n return shell_output('git rev-list {base_version}..HEAD --count'\n .format(base_version=get_base_version()))\n return COMMIT_COUNT", "def commit_hash(self):\n return self._commit_hash", "def commit():\n return _run_indexer_func(\"commit\")", "def find_element_by_commit(sysmlId, commitId):\n elementList = get_elements_from_elasticsearch(sysmlId)\n for element in elementList:\n if element[\"_source\"][\"_commitId\"] == commitId:\n return element[\"_source\"]", "def update_commit_cache(commit, cache):\n\n cache.append(commit.id)", "def commit(self, sha):\r\n return repocommits.RepoCommit(self, sha)", "def cur_commit():\n result = run(\n [\"git\", \"rev-parse\", \"HEAD\"], stdout=PIPE, stderr=PIPE, encoding=\"utf-8\",\n )\n result.check_returncode()\n return result.stdout.strip()", "def get_committable_i(n, c):\n if \"committable\" not in n.df(c):\n idx = pd.Index([])\n else:\n idx = n.df(c)[lambda ds: ds[\"committable\"]].index\n return idx.rename(f\"{c}-com\")", "def commit(self):\n return settings.GIT_COMMIT", "def next_commit(self, commit):\n return RainerCommit(\n {\"version\" : int(commit.meta[\"version\"]) + 1,\n \"author\" : os.getlogin(),\n \"comment\" : \"\"},\n commit.value if commit.value is not None else \"\"\n )", "def get_commit_by_url(commit_url):\n commit_sql = \"SELECT * FROM github_commit WHERE url=?\"\n return dbutils.execute_query(commit_sql, (commit_url,), DATABASE_FILE)", "def seek_commit_position(self, commit_position):\n return self._most_recent_log_matching(\n '^Cr-Commit-Position: %s' % commit_position)", "def _add_commit_sha1_to_lists(self):\n sha1_num_commits = \"-\" + self.commit_number\n sha1_args = [sha1_num_commits, \"--pretty=%h\"]\n # git log -[N] --pretty=%h ===> newline delimited list of SHA1 x N commit\n sha1_string = self.git.log(sha1_args)\n # do not modify to os.linesep, Win fails tests with this change\n self.commit_sha1_list = sha1_string.split(\"\\n\")", "def commit_naught(commits):\n print(type(commits.get('commits')[0].get('id')))\n # print(commits.get('commits'))\n # print(commits.get('commits').get('id'))\n #print(commits[0].get('commits').get('id'))\n\n if bool(commits):\n return commits.get('commits')[0].get('id')\n return \"no commits\"", "def add_commit(repo, cfg, model, developer_gen, date):\n model, kwargs = model_note_change(model, developer_gen, date)\n msg = message_of(\n cfg, model.ticket if model.ticket is not None else \"\", \"general_commit_words\"\n )\n repo.index.commit(msg, **kwargs)\n return repo, model", "def get_current_commit_sha():\n return check_output(\n \"git rev-parse HEAD\".split(\" \")\n ).decode('utf-8').strip()", "def commits_behind_master(self, commit):\n return len(\n self.run(['git', 'rev-list',\n '{}..origin/master'.format(commit)]).splitlines())", "def get_git_commit_count(path):\n process = subprocess.Popen(['git', 'rev-list', 'HEAD', '--count', '--no-merges'], cwd=path, stdout=subprocess.PIPE)\n stdout, _ = process.communicate()\n number = stdout.decode().strip(\"\\n\")\n return int(number)" ]
[ "0.6170759", "0.593805", "0.5836253", "0.56688523", "0.5563604", "0.5461908", "0.5457891", "0.5429918", "0.5420427", "0.5400795", "0.5369754", "0.5346436", "0.5341644", "0.532622", "0.53205234", "0.5295877", "0.5276312", "0.52377796", "0.5224023", "0.52174157", "0.5216296", "0.5183774", "0.5183015", "0.51764715", "0.51421344", "0.50859135", "0.5066372", "0.5061995", "0.5056588", "0.5054754" ]
0.64689815
0
Uniquely abbreviates the given SHA1.
def abbreviate_sha1(cls, sha1): # For now we invoke git-rev-parse(1), but hopefully eventually # we will be able to do this via pygit2. cmd = ['git', 'rev-parse', '--short', sha1] # cls.logger.debug(" ".join(cmd)) out = subprocess.check_output(cmd).strip() # cls.logger.debug(out) return out
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uniquely_shorten(string, length):\n\n if len(string) <= length and not (len(string) == length and\n string.startswith(SHORTENED_PREFIX)):\n return string\n\n h = hashlib.sha256()\n h.update(\"%s \" % length)\n h.update(string)\n hash_text = h.hexdigest()\n\n return SHORTENED_PREFIX + hash_text[:length-len(SHORTENED_PREFIX)]", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def sha1(s: str) -> str:\n return hashlib.sha1(s.encode()).hexdigest()", "def short_hash(hash, chars=11):\n ch_ea = int((chars - 3) / 2)\n if hash is None:\n return (\"0\" * ch_ea) + \"...\" + (\"0\" * ch_ea)\n return hash[:ch_ea] + \"...\" + hash[(-1 * ch_ea):]", "def sha1(self) -> str:\n return self.data.sha1", "def dopplr(name):\n return \"#\" + hashlib.sha224(name).hexdigest()[:6]", "def shortName(self, length = 3):\n if len(self.short) == length: return self.short\n s = str(self)\n if len(s) < length:\n self.short = s + \" \"*(length-len(s))\n return self.short\n r = []; alphaNum = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n for ch in s:\n if ch in alphaNum:\n r.append(ch)\n elif ch in \", \": alphaNum = alphaNum + \"0123456789\"\n elif ch == \".\":\n del r[-1]\n alphaNum = alphaNum[:26]\n r = r[:length]\n if len(r) < length: r.extend([\" \"]*(length-len(r)))\n if self.suffix >= 1: r[-1] = str(self.suffix)[-1]\n self.short = \"\".join(r)\n return self.short", "def nice_hash(*args):\n h = sha1()\n for item in args:\n h.update(unicode(item))\n return b32encode(h.digest())", "def format_unique_id(address: str) -> str:\n return address.replace(\":\", \"\").lower()", "def sha1Function():\r\n\r\n sha1Input = input(\"Enter SHA-1 String: \") # user input for hashing\r\n \r\n sha1Result = hashlib.sha1(sha1Input.encode()) # encoding user input then sending to sha1() function\r\n \r\n print(\"Hashing Successful\")\r\n print(\"The SHA-1 Hashing Result is : \", end =\"\") \r\n print(sha1Result.hexdigest()) # printing the hashing result in hexadecimal value\r\n\r\n menu() # display the menu again\r", "def hash_cli_name(name):\n from hashlib import blake2b\n return blake2b(name.encode(), digest_size=32).hexdigest()", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def sha1(self):\n return self.tag(\"sha1\")", "def sha1(self, s):\n\t\tself.sha1_calls += 1\n\t\treturn int(hashlib.sha1(s).hexdigest(), 16)", "def _get_prefix(self):\r\n return _sha512('health'.encode('utf-8'))[0:6]", "def add_sha1(sender, form, **kwargs):\n if not const.TAG_SHA1 in form.all_properties():\n form[\"#sha1\"] = form.xml_sha1()\n form.save()\n else:\n current_sha = form.all_properties().get(const.TAG_SHA1, \"\")\n calculated_sha = form.xml_sha1()\n if current_sha != calculated_sha:\n logging.error(\"bad sha-1 calculation for form %s, was %s but expected %s... overriding\" % \\\n (form.get_id, current_sha, calculated_sha))\n form[\"#sha1\"] = calculated_sha\n form.save()", "def _hash_name(self, name, length=None):\n if not length:\n length = self.header_size\n hashed = name[:min(length, len(name))]\n for x in range(length, len(name), length):\n rem = min(x+length,len(name))-x\n for i in range(rem):\n hashed = hashed[:i] + chr(ord(name[x + i]) ^ ord(hashed[i])) + hashed[i+1:]\n if len(hashed) < length:\n hashed += '\\x00' * (length-len(hashed))\n return hashed", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def pack_name(name, prefix_length=0):\n name = str(name)\n if len(name) > 63 - prefix_length:\n hash = base64.b64encode(hashlib.md5(name.encode()).digest()).decode(\n ).rstrip('=')\n name = name[:prefix_length] + hash + ':' + name[-(\n 63 - prefix_length - 1 - len(hash)):]\n return name", "def capitalize1(s):\n return s[:1].upper() + s[1:]", "def get_sha1(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha1_from_stream(i)", "def flatname(name, md5it=False):\n\n flat = \"\".join(i for i in name if i.isalnum()).lower()\n flat = hashlib.md5(name.encode(\"utf-8\")).hexdigest() if md5it else flat\n return flat", "def unique_label(orig_label: str) -> str:\n return orig_label[0] + \"l\" + uuid4().hex\n # TODO: check for meteors.", "def _HashFilename(filename):\n if isinstance(filename, unicode):\n filename = filename.encode(UTF8)\n else:\n filename = unicode(filename, UTF8).encode(UTF8)\n m = hashlib.sha1(filename)\n return 'TRACKER_' + m.hexdigest() + '.' + filename[-16:]", "def shortHostname(self) -> str:\n\t\treturn self.hostname[0]", "def nice_username(email):\n clean_email = re.sub(r'\\W', '', email.replace('@', '_')).lower()\n hash = b32encode(sha1(email + str(random.random())).digest()).strip('=').lower()\n return u'%s_%s' % (clean_email[:20], hash[:6])", "def sha1hex(doc):\n doc_id = doc.pop('_id',None)\n doc_rev = doc.get('_rev',None)\n doc_string = str(doc)\n\n if doc_id is not None:\n doc['_id'] = doc_id\n\n if doc_rev is not None:\n doc['_rev'] = doc_rev\n\n return hashlib.sha1(doc_string).hexdigest().upper()", "def make_unique_username(username):\n if User.query.filter_by(username = username).first() is None:\n return username\n version = 2\n new_username = None\n while True:\n new_username = username + str(version)\n if User.query.filter_by(username = new_username).first() is None:\n break\n version += 1\n return new_username", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def typeahead_hash(self) -> str:" ]
[ "0.6464314", "0.6410942", "0.6409734", "0.6156034", "0.6136031", "0.60622424", "0.6015629", "0.58965665", "0.58919525", "0.58904374", "0.58792883", "0.5815525", "0.5790694", "0.5786428", "0.5748069", "0.5668363", "0.5649841", "0.5627461", "0.56117934", "0.5600008", "0.55969566", "0.55440825", "0.5519698", "0.5499281", "0.5496322", "0.54804045", "0.5454402", "0.5448057", "0.54357606", "0.5431138" ]
0.7369677
0
Returns all refs pointing to the given SHA1.
def refs_to(cls, sha1, repo): matching = [] for refname in repo.listall_references(): symref = repo.lookup_reference(refname) dref = symref.resolve() oid = dref.target commit = repo.get(oid) if commit.hex == sha1: matching.append(symref.shorthand) return matching
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_refs(self, for_push: bool) -> List[Tuple[str, str]]:\n try:\n loc = posixpath.join(self._path, \"refs\")\n res = self._connection.files_list_folder(loc, recursive=True)\n files = res.entries\n while res.has_more:\n res = self._connection.files_list_folder_continue(res.cursor)\n files.extend(res.entries)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.ListFolderError):\n raise\n if not for_push:\n # if we're pushing, it's okay if nothing exists beforehand,\n # but it's good to notify the user just in case\n self._trace(\"repository is empty\", Level.INFO)\n else:\n self._first_push = True\n return []\n files = [i for i in files if isinstance(i, dropbox.files.FileMetadata)]\n paths = [i.path_lower for i in files]\n if not paths:\n return []\n revs: List[str] = []\n data: List[bytes] = []\n for rev, datum in self._get_files(paths):\n revs.append(rev)\n data.append(datum)\n refs = []\n for path, rev, datum in zip(paths, revs, data):\n name = self._ref_name_from_path(path)\n sha = datum.decode(\"utf8\").strip()\n self._refs[name] = (rev, sha)\n refs.append((sha, name))\n return refs", "def refs(self):\n p = Popen(['git', 'show-ref', '--no-head'], cwd=self.path, stdout=PIPE)\n for line in p.stdout:\n commit_id, refname = line.split()\n yield (CommitId(commit_id), refname)", "def get_refs_with_prefix(prefix):\n return (\n subprocess.check_output([\"git\", \"for-each-ref\", \"--format=%(refname)\", prefix],)\n .decode()\n .splitlines()\n )", "def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)", "def list_refs(self):\n pass", "def list_remote_refs(git_url):\n client, path = dulwich.client.get_transport_and_path(git_url)\n try:\n refs = client.fetch_pack(path, lambda refs: [], None, lambda data: None)\n return {k.decode(\"UTF-8\"): v.decode(\"UTF-8\") for k, v in refs.items()}\n except dulwich.errors.HangupException as e:\n raise LSRemoteException(f\"Unable to fetch remote refs from {git_url}: {e}\")", "def get_refs(*args, **kwargs):\n return get_refs_async(*args, **kwargs).get_result()", "def list_all_refs(self):\n self.list_refs()\n self.list_ref0s()\n self.list_defect_refs()", "def _add_commit_sha1_to_lists(self):\n sha1_num_commits = \"-\" + self.commit_number\n sha1_args = [sha1_num_commits, \"--pretty=%h\"]\n # git log -[N] --pretty=%h ===> newline delimited list of SHA1 x N commit\n sha1_string = self.git.log(sha1_args)\n # do not modify to os.linesep, Win fails tests with this change\n self.commit_sha1_list = sha1_string.split(\"\\n\")", "def resolve_all_refs(s):\n refs = []\n # ask all graphs for REFs\n for graph in s.graphs.values():\n refs.extend( graph.list_of_all_unpointed_refs() )\n\n # resolve collected refs\n for ref in refs:\n ref.resolve()\n\n return len( refs )", "def get_refs(genome_build, aligner, config):\n find_fn = _find_file(config[CONFIG_KEY], startswith=True)\n ref_prefix = sret.find_ref_prefix(genome_build, find_fn)\n return sret.standard_genome_refs(genome_build, aligner, ref_prefix, _list(config[CONFIG_KEY]))", "def list_project_refs(self, entity):\n\n refs = []\n\n for ref in self.cache.list_project_refs(entity.objects['project'], self.tagRefs):\n # If ref name is hierarchical then only return first level\n if '/' in ref.name:\n refs.append(ref.name.split('/')[0])\n else:\n refs.append(ref.name)\n\n # Refs may contain duplicates if the same prefix occurs multiple times\n return list(set(refs))", "def cmd_get_sha(ref):\n return ['git', 'rev-parse', ref]", "def getReferencesFrom(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "async def get_refs_all(self, lastUpdate):\n await asyncio.gather(\n *tuple(\n asyncio.ensure_future(self.get_refs_each(item, lastUpdate))\n for item in self.criteria\n ),\n return_exceptions=True\n )", "def show_refs(config, args):\n for item in lib.input_json_lines():\n yield config.repo.ref(item)", "def _fetch_sha1(stale_check):\n retrycount = 5\n while retrycount != 0:\n try:\n contents = urlopen(\"http://\" + stale_check).read().decode(\"utf-8\")\n return json.loads(contents)[\"sha\"]\n except URLError:\n retrycount -= 1\n\n return None", "def get_all_refobjs(self, ):\n return cmds.ls(type=\"jb_reftrack\")", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def find_xrefs_multi_async(xrefs):\n # The IN operator does multiple sequential queries and ORs them\n # together. This is slow here-- a range query is faster, since\n # this is used to get xrefs for a set of contiguous builds.\n if not xrefs: # nothing => nothing\n raise ndb.Return({})\n xrefs = set(xrefs)\n issues = yield GHIssueDigest.query(\n GHIssueDigest.xref >= min(xrefs),\n GHIssueDigest.xref <= max(xrefs)).fetch_async(batch_size=500)\n refs = {}\n for issue in issues:\n for xref in issue.xref:\n if xref in xrefs:\n refs.setdefault(xref, []).append(issue)\n raise ndb.Return(refs)", "def get_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\tif xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def refs(self):\n return self._refs", "def get_refs(self): \n for row in self._get_references_node():\n yield row.fetch_all_fields()", "def CheckHashes(self, hashes, unused_external=True):\n hash_map = {}\n for hsh in hashes:\n if hsh.HasField(\"sha1\"):\n digest = hsh.sha1\n hash_urn = self.PATH.Add(str(digest))\n logging.info(\"Checking URN %s\", str(hash_urn))\n hash_map[hash_urn] = digest\n\n for metadata in aff4.FACTORY.Stat(list(hash_map), token=self.token):\n yield metadata[\"urn\"], hash_map[metadata[\"urn\"]]", "async def refs(self, user, repo):\n ref_types = (\"branches\", \"tags\")\n ref_data = [None, None]\n\n for i, ref_type in enumerate(ref_types):\n with self.catch_client_error():\n response = await getattr(self.github_client, \"get_%s\" % ref_type)(\n user, repo\n )\n ref_data[i] = json.loads(response_text(response))\n\n return ref_data", "def get_short_crefs_from( ea ):\r\n\tret = []\r\n\txrf = get_first_cref_from( ea )\r\n\txrf2 = get_first_fcref_from( ea )\r\n\tif xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\txrf = get_next_cref_from( ea, xrf )\r\n\twhile xrf != BADADDR and xrf != xrf2:\r\n\t\tret.append( xrf )\r\n\t\txrf = get_next_cref_from( ea, xrf )\r\n\treturn ret", "def object_resolve(repo, name):\n candidates = list()\n hashRE = re.compile(r\"^[0-9A-Fa-f]{4,40}$\")\n\n # Empty string? Abort.\n if not name.strip():\n return None\n\n # Head is nonambiguous\n if name == \"HEAD\":\n return [ref_resolve(repo, \"HEAD\")]\n\n if hashRE.match(name):\n if len(name) == 40:\n # This is a complete hash\n return [name.lower()]\n\n # This is a small hash 4 seems to be the minimal length\n # for git to consider something a short hash.\n # This limit is documented in man git-rev-parse\n name = name.lower()\n prefix = name[0:2]\n path = os.path.join(repo.git_obj_dir, prefix)\n if path:\n rem = name[2:]\n for f in os.listdir(path):\n if f.startswith(rem):\n candidates.append(prefix + f)\n return candidates", "def get_refs_async(hostname, project, ref_prefix=None, **fetch_kwargs):\n ref_prefix = ref_prefix or 'refs/'\n assert ref_prefix.startswith('refs/')\n assert ref_prefix.endswith('/')\n _validate_args(hostname, project)\n\n path = '%s/+refs' % urllib.parse.quote(project)\n\n prepend_prefix = False\n if len(ref_prefix) > len('refs/'):\n path += ref_prefix[4:-1] # exclude \"refs\" prefix and \"/\" suffix.\n prepend_prefix = True\n res = yield gerrit.fetch_json_async(hostname, path, **fetch_kwargs)\n if res is None:\n raise ndb.Return(None)\n\n ret = {}\n for k, v in res.items():\n # if ref_prefix was specified and there is a ref matching exactly the\n # prefix, gitiles returns full ref, not ''.\n if prepend_prefix and k != ref_prefix[:-1]: # -1 to exclude \"/\" suffix\n k = ref_prefix + k\n ret[k] = v['value']\n raise ndb.Return(ret)", "def getReferencesTo(self, address: ghidra.program.model.address.Address) -> List[ghidra.program.model.symbol.Reference]:\n ...", "def _sources_hash(self, sha, paths):\r\n files = []\r\n for relative_filename, filename in self._walk_paths(paths):\r\n with open(filename, \"rb\") as fd:\r\n sha.update(Compatibility.to_bytes(relative_filename))\r\n sha.update(fd.read())\r\n files.append(filename)\r\n return files" ]
[ "0.62746805", "0.6179788", "0.6128106", "0.5900156", "0.5898155", "0.5856927", "0.57434326", "0.57430595", "0.56420195", "0.5639147", "0.55719423", "0.55558074", "0.5515448", "0.5467317", "0.5440578", "0.5412632", "0.5385987", "0.5375013", "0.5368968", "0.53635335", "0.53559995", "0.5346984", "0.53393745", "0.5319535", "0.5271539", "0.5261537", "0.52585053", "0.52537423", "0.52372754", "0.5190607" ]
0.7806024
0
Find all dependencies of the given revision, recursively traversing the dependency tree if requested.
def find_dependencies(self, dependent_rev, recurse=None): if recurse is None: recurse = self.options.recurse try: dependent = self.get_commit(dependent_rev) except InvalidCommitish as e: abort(e.message()) self.todo.append(dependent) self.todo_d[dependent.hex] = True while self.todo: sha1s = [commit.hex[:8] for commit in self.todo] self.logger.debug("TODO list: %s" % " ".join(sha1s)) dependent = self.todo.pop(0) del self.todo_d[dependent.hex] self.logger.debug("Processing %s from TODO list" % dependent.hex[:8]) self.notify_listeners('new_commit', dependent) for parent in dependent.parents: self.find_dependencies_with_parent(dependent, parent) self.done.append(dependent.hex) self.done_d[dependent.hex] = True self.logger.debug("Found all dependencies for %s" % dependent.hex[:8]) # A commit won't have any dependencies if it only added new files dependencies = self.dependencies.get(dependent.hex, {}) self.notify_listeners('dependent_done', dependent, dependencies) self.notify_listeners('all_done')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dependencies(self, revision: Dict) -> List[Dict]:\n dependency_ids = revision['auxiliary']['phabricator:depends-on']\n revisions = self.get_revisions(phids=dependency_ids)\n result = []\n for r in revisions:\n result.append(r)\n sub = self.get_dependencies(r)\n result.extend(sub)\n return result", "def get_revision_dependencies(self, revision_name: str) -> List[Set[str]]:\n revisions: Dict[str, DBRevision] = self.load_revisions()\n revision_layers: List[Set[str]] = [{revision_name}]\n\n while True:\n new_layer: Set[str] = set()\n for rev in revision_layers[-1]:\n new_layer = new_layer.union(revisions[rev].dependencies)\n\n if len(new_layer) == 0:\n break\n\n revision_layers.append(new_layer)\n revision_layers.reverse()\n return revision_layers", "def find_dependants_recurse(key, rev_tree, previous=None):\n if previous is None:\n previous = set()\n if not key in rev_tree:\n return []\n this_level_dependants = set(rev_tree[key])\n next_level_dependants = set()\n for dependant in this_level_dependants:\n if dependant in previous:\n continue\n tmp_previous = previous.copy()\n tmp_previous.add(dependant)\n next_level_dependants.update(\n find_dependants_recurse(dependant, rev_tree,\n previous=tmp_previous,\n ))\n # ensures reloading order on the final list\n # by postponing the reload of modules in this level\n # that also appear later on the tree\n dependants = (list(this_level_dependants.difference(\n next_level_dependants)) +\n list(next_level_dependants))\n return dependants", "def find_dependencies(root):\n \n symbol_table = create_symbol_table(root)\n\n names = []\n #Set the depth of the root node\n set_depth(root, 0)\n #Stack of nodes to visit\n stack = Stack(root)\n \n #List of (src, dest) of dependencies\n dependency_table = DTable(symbol_table=symbol_table)\n\n for node, children, ntype in stack:\n \n stack.check_and_push_scope()\n\n #A Name is being loaded, therefore \n if ntype == \"Name\" and is_load(children):\n \"\"\"\n \"\"\"\n dependency_table.append( (stack.scopes, node))\n \n elif ntype == \"Assign\":\n #TODO need to add assignments and then revoke them\n #for child in children:\n #print children\n pass\n\n \n elif ntype == \"Attribute\":\n #TODO: attribute chains can be arbitrarily long\n #dep_dest = \"{}.{}\".format(node.value.id, node.attr)\n #print \"{} => {}\".format(scopes_to_str(scopes), dep_dest)\n\n #TODO: Can't just do dependency_table.append( (scopes, node))\n #since the unique_id function won't match the create the dep string like \n #{node.value.id}.{node.attr}.\n #Either generalize unique_id or something else.\n \n #Don't add children\n continue\n \n set_lineno(node, children)\n #Add children to stack\n #This musn't always be performed\n for child in children[::-1]:\n set_depth(child, node.depth + 1)\n stack.append(child)\n\n print \"dependency table is \"\n print dependency_table", "def get_projectversion_deps(projectversion_id, session):\n query = \"\"\"\n WITH RECURSIVE getparents(projectversion_id, dependency_id) AS (\n SELECT projectversion_id, dependency_id\n FROM projectversiondependency\n WHERE projectversion_id = :projectversion_id\n\n UNION ALL\n\n SELECT s2.projectversion_id, s2.dependency_id\n FROM projectversiondependency s2, getparents s1\n WHERE s2.projectversion_id = s1.dependency_id\n )\n SELECT projectversion_id, dependency_id FROM getparents;\n \"\"\"\n result = session.execute(query, {\"projectversion_id\": projectversion_id})\n\n projectversion_ids = []\n\n for row in result:\n projectversion_ids.append(row[1])\n\n return projectversion_ids", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def _expand_deps_revisions(self, revision_to_expand):\n # TODO(robertocn): Review variable names in this function. They are\n # potentially confusing.\n assert revision_to_expand is not None\n try:\n min_revision = revision_to_expand.previous_revision\n max_revision = revision_to_expand\n # Parses DEPS file and sets the .deps property.\n min_revision.read_deps(self.get_perf_tester_name())\n max_revision.read_deps(self.get_perf_tester_name())\n for depot_name in depot_config.DEPOT_DEPS_NAME.keys():\n if depot_name in min_revision.deps and depot_name in max_revision.deps:\n dep_revision_min = min_revision.deps[depot_name]\n dep_revision_max = max_revision.deps[depot_name]\n if (dep_revision_min and dep_revision_max and\n dep_revision_min != dep_revision_max):\n rev_list = self._get_rev_range_for_depot(depot_name,\n dep_revision_min,\n dep_revision_max,\n min_revision)\n new_revisions = self.revisions[:max_revision.list_index]\n new_revisions += rev_list\n new_revisions += self.revisions[max_revision.list_index:]\n self.revisions = new_revisions\n self._update_revision_list_indexes()\n return True\n except RuntimeError: # pragma: no cover\n warning_text = ('Could not expand dependency revisions for ' +\n revision_to_expand.revision_string)\n self.surface_result('BAD_REV')\n if warning_text not in self.warnings:\n self.warnings.append(warning_text)\n return False", "def PrintRevInfo(self):\n # Check for revision overrides.\n revision_overrides = {}\n for revision in self._options.revisions:\n if revision.find(\"@\") < 0:\n raise gclient_utils.Error(\n \"Specify the full dependency when specifying a revision number.\")\n revision_elem = revision.split(\"@\")\n # Disallow conflicting revs\n if revision_overrides.has_key(revision_elem[0]) and \\\n revision_overrides[revision_elem[0]] != revision_elem[1]:\n raise gclient_utils.Error(\n \"Conflicting revision numbers specified.\")\n revision_overrides[revision_elem[0]] = revision_elem[1]\n\n solutions = self.GetVar(\"solutions\")\n if not solutions:\n raise gclient_utils.Error(\"No solution specified\")\n\n entries = {}\n entries_deps_content = {}\n\n # Inner helper to generate base url and rev tuple (including honoring\n # |revision_overrides|)\n def GetURLAndRev(name, original_url):\n url, revision = gclient_utils.SplitUrlRevision(original_url)\n if not revision:\n if revision_overrides.has_key(name):\n return (url, revision_overrides[name])\n else:\n scm = gclient_scm.CreateSCM(solution[\"url\"], self._root_dir, name)\n return (url, scm.revinfo(self._options, [], None))\n else:\n if revision_overrides.has_key(name):\n return (url, revision_overrides[name])\n else:\n return (url, revision)\n\n # Run on the base solutions first.\n for solution in solutions:\n name = solution[\"name\"]\n if name in entries:\n raise gclient_utils.Error(\"solution %s specified more than once\" % name)\n (url, rev) = GetURLAndRev(name, solution[\"url\"])\n entries[name] = \"%s@%s\" % (url, rev)\n # TODO(aharper): SVN/SCMWrapper cleanup (non-local commandset)\n entries_deps_content[name] = gclient_scm.scm.SVN.Capture(\n [\"cat\",\n \"%s/%s@%s\" % (url,\n self._options.deps_file,\n rev)],\n os.getcwd())\n\n # Process the dependencies next (sort alphanumerically to ensure that\n # containing directories get populated first and for readability)\n deps = self._ParseAllDeps(entries, entries_deps_content)\n deps_to_process = deps.keys()\n deps_to_process.sort()\n\n # First pass for direct dependencies.\n for d in deps_to_process:\n if type(deps[d]) == str:\n (url, rev) = GetURLAndRev(d, deps[d])\n entries[d] = \"%s@%s\" % (url, rev)\n\n # Second pass for inherited deps (via the From keyword)\n for d in deps_to_process:\n if type(deps[d]) != str:\n deps_parent_url = entries[deps[d].module_name]\n if deps_parent_url.find(\"@\") < 0:\n raise gclient_utils.Error(\"From %s missing revisioned url\" %\n deps[d].module_name)\n content = gclient_utils.FileRead(os.path.join(self._root_dir,\n deps[d].module_name,\n self._options.deps_file))\n sub_deps = self._ParseSolutionDeps(deps[d].module_name, content, {})\n (url, rev) = GetURLAndRev(d, sub_deps[d])\n entries[d] = \"%s@%s\" % (url, rev)\n print(\";\\n\\n\".join([\"%s: %s\" % (x, entries[x])\n for x in sorted(entries.keys())]))", "def compute_dependencies(repositories, requirement, transitive=False):\n pool = Pool(repositories)\n neighbors = _neighbors_in_repositories(pool, transitive)\n dependencies = _neighbors_for_requirement(pool, neighbors, requirement)\n return dependencies", "def closure(cls, roots: Iterable[ClasspathEntry]) -> Iterator[ClasspathEntry]:\n\n visited = set()\n queue = deque(roots)\n while queue:\n ct = queue.popleft()\n if ct in visited:\n continue\n visited.add(ct)\n yield ct\n queue.extend(ct.dependencies)", "def dependencies(artifact_or_id):\n artifact = r.coerce_to_artifact(artifact_or_id)\n visited = []\n queue = [artifact]\n while queue:\n a, *queue = queue\n\n if a in visited:\n continue\n\n visited.append(a)\n queue.extend(_artifact_branches(a))\n\n visited.reverse()\n return visited", "def dep_tree(self, root):\n \n graph = {}\n for key,extract in self.extracts.items():\n graph[key] = set(extract.get('depends',[]))\n \n def _recurse(node):\n l = set([node])\n for n in graph[node]:\n l = l | _recurse(n)\n \n return l\n \n return _recurse(root)", "def build_revision_layers(self, require_all: bool = True, include_applied: bool = False) -> List[Set[str]]:\n revisions: Dict[str, DBRevision] = self.load_revisions()\n\n revision_layers: List[Set[str]] = []\n flat_revision_layers: Set[str] = set()\n if include_applied:\n revision_layers.append(self.applied_revisions)\n flat_revision_layers: Set[str] = flat_revision_layers.union(self.applied_revisions)\n for rev in self.applied_revisions:\n revisions.pop(rev, None)\n\n while True:\n new_layer: Set[str] = set()\n revision_names: List[str] = list(revisions.keys())\n for rev_name in revision_names:\n if (revisions[rev_name].dependencies.issubset(flat_revision_layers)\n and rev_name not in flat_revision_layers):\n new_layer.add(rev_name)\n revisions.pop(rev_name)\n\n # Stop building layers when either no new revisions were put into the layer,\n # or all revisions have been accounted for\n if len(new_layer) == 0:\n break\n else:\n flat_revision_layers = flat_revision_layers.union(new_layer)\n revision_layers.append(new_layer)\n\n if len(revisions.keys()) == 0:\n break\n\n if require_all and len(revisions) != 0:\n raise Exception(f\"could not resolve dependencies for the following revisions: {revisions.keys()}\")\n\n return revision_layers", "def compute_reverse_dependencies(repositories, requirement, transitive=False):\n pool = Pool(repositories)\n reverse_neighbors = _reverse_neighbors_in_repositories(pool, transitive)\n dependencies = _neighbors_for_requirement(pool, reverse_neighbors,\n requirement)\n return dependencies", "def svn_client_list(char_path_or_url, svn_opt_revision_t_peg_revision, svn_opt_revision_t_revision, svn_boolean_t_recurse, apr_uint32_t_dirent_fields, svn_boolean_t_fetch_locks, svn_client_list_func_t_list_func, void_baton, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def revisions(self, path, rev_limit=1000):\n path = \"/revisions/%s%s\" % (self.session.root, format_path(path))\n\n params = {\n 'rev_limit': rev_limit,\n }\n\n url, params, headers = self.request(path, params, method='GET')\n\n return self.rest_client.GET(url, headers)", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for element in self.elements:\n if isinstance(element, CellReference) or isinstance(\n element, CellArray):\n if recursive:\n dependencies.update(\n element.ref_cell.get_dependencies(True))\n dependencies.add(element.ref_cell)\n return dependencies", "def svn_client_ls(apr_hash_t_dirents, char_path_or_url, svn_opt_revision_t_revision, svn_boolean_t_recurse, svn_client_ctx_t_ctx, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass", "def repository_dependencies(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository_dependencies\", host, owner, repo)", "def dependencies(spec, request=\"all\"):\n if request not in (\"none\", \"direct\", \"all\"):\n message = \"Wrong value for argument 'request' : \"\n message += \"should be one of ('none', 'direct', 'all')\"\n raise tty.error(message + \" [current value is '%s']\" % request)\n\n if request == \"none\":\n return []\n\n if request == \"direct\":\n return spec.dependencies(deptype=(\"link\", \"run\"))\n\n # FIXME : during module file creation nodes seem to be visited multiple\n # FIXME : times even if cover='nodes' is given. This work around permits\n # FIXME : to get a unique list of spec anyhow. Do we miss a merge\n # FIXME : step among nodes that refer to the same package?\n seen = set()\n seen_add = seen.add\n deps = sorted(\n spec.traverse(order=\"post\", cover=\"nodes\", deptype=(\"link\", \"run\"), root=False),\n reverse=True,\n )\n return [d for d in deps if not (d in seen or seen_add(d))]", "def _knownrevs(repo, nodes):\n torev = repo.changelog.nodemap.get\n for n in nodes:\n rev = torev(n)\n if rev is not None:\n yield rev", "def find_dependencies_with_parent(self, dependent, parent):\n self.logger.debug(\" Finding dependencies of %s via parent %s\" %\n (dependent.hex[:8], parent.hex[:8]))\n diff = self.repo.diff(parent, dependent,\n context_lines=self.options.context_lines)\n for patch in diff:\n path = patch.delta.old_file.path\n self.logger.debug(\" Examining hunks in %s\" % path)\n for hunk in patch.hunks:\n self.blame_hunk(dependent, parent, path, hunk)", "def dependencies(self):\n tree_to_heads = {}\n for tree in reversed(list(self.all_subtrees())):\n if len(tree):\n head = tree.head()\n assert head.span() in tree_to_heads\n tree_to_heads[tree.span()] = tree_to_heads[head.span()]\n\n for subtree in tree:\n subhead = tree_to_heads[subtree.span()]\n if subhead.span() != head.span():\n yield (head, subhead)\n else:\n tree_to_heads[tree.span()] = tree", "def _get_dirents(self, path, rev):\n\n dir_url = self._geturl(path)\n if path:\n key = str(rev) + '/' + path\n else:\n key = str(rev)\n dirents_locks = self._dirent_cache.get(key)\n if not dirents_locks:\n dirents, locks = list_directory(dir_url, _rev2optrev(rev),\n _rev2optrev(rev), 0, self.ctx)\n dirents_locks = [dirents, locks]\n self._dirent_cache[key] = dirents_locks\n return dirents_locks[0], dirents_locks[1]", "def test_scan_and_find_dependencies_maven():\n manifests = [{\n \"filename\": \"dependencies.txt\",\n \"filepath\": \"/bin/local\",\n \"content\": open(str(Path(__file__).parent / \"data/dependencies.txt\")).read()\n }]\n res = DependencyFinder().scan_and_find_dependencies(\"maven\", manifests)\n assert \"result\" in res\n resolved = res['result'][0]['details'][0]['_resolved'][0]\n assert resolved['package'] == \"io.vertx:vertx-core\"\n assert len(resolved['deps']) == 15", "def dependencies(self, ticketnum, all=False, _seen=None):\n # returns the list of all ticket dependencies, sorted by ticket number\n if _seen is None:\n seen = []\n elif ticketnum in _seen:\n return\n else:\n seen = _seen\n seen.append(ticketnum)\n data = self._get_attributes(ticketnum)\n if 'dependencies' not in data: return []\n dependencies = data['dependencies']\n if dependencies.strip() == '': return []\n dependencies = [a.strip(\" ,;+-\\nabcdefghijklmnopqrstuvwxyz\") for a in data['dependencies'].split('#')]\n dependencies = [a for a in dependencies if a]\n dependencies = [int(a) if a.isdigit() else a for a in dependencies]\n if not all:\n return dependencies\n for a in dependencies:\n if isinstance(a, int):\n self.dependencies(a, True, seen)\n else:\n seen.append(a)\n if _seen is None:\n return seen[1:]", "def __dependency_traverse(self, v, visited, results):\n if v in visited:\n return\n visited.add(v)\n # Create list of input data\n data = []\n for p in v.predecessors():\n # Traverse dependencies that have not been visited yet\n self.__dependency_traverse(p, visited, results)\n data.append(results[p])\n data = data[0] if len(data) == 1 else data\n # Flatten list of input data if there is only one element\n results[v] = self.traverse_vertex(v, data)\n # Traverse transforms that require this dependency and have not been visited\n for n in v.successors():\n self.__dependency_traverse(n, visited, results)", "def dependencies(self, options):\n if not 'dependencies' in self.cscript:\n return\n\n if len(inspect.getfullargspec(self.cscript['dependencies']).args) == 2:\n self_options = copy.copy(options)\n self.add_defaults(self_options)\n deps = self.call('dependencies', self_options)\n else:\n log_normal(\"Deprecated cscript dependencies() method with no options parameter\")\n deps = self.call('dependencies')\n\n # Loop over our immediate dependencies\n for d in deps:\n dep = globals.trees.get(d[0], d[1], self.target, self.name)\n\n # deps only get their options from the parent's cscript\n dep_options = d[2] if len(d) > 2 else {}\n for i in dep.dependencies(dep_options):\n yield i\n yield (dep, dep_options, self)", "def flush(self, revision=None):\n if not revision:\n print \"Flushing all cached results...\",\n\n try:\n rmtree(\".digress_%s\" % self.__class__.__name__)\n except Exception, e:\n print \"failed: %s\" % e\n else:\n print \"done.\"\n else:\n try:\n rev = self.scm.rev_parse(revision)\n except SCMError, e:\n print e\n else:\n print \"Flushing cached results for %s...\" % rev,\n\n try:\n rmtree(os.path.join(\".digress_%s\" % self.__class__.__name__, rev))\n except Exception, e:\n print \"failed: %s\" % e\n else:\n print \"done.\"", "def find_with_deps(self, package_names):" ]
[ "0.73712236", "0.6699492", "0.6546439", "0.6005795", "0.5999744", "0.55470836", "0.55263996", "0.54850143", "0.54307467", "0.5410811", "0.5379994", "0.5371398", "0.53526044", "0.5317734", "0.529328", "0.5279827", "0.5243155", "0.52289486", "0.5216687", "0.5189272", "0.5168941", "0.5063986", "0.5058812", "0.50556993", "0.5048337", "0.5041167", "0.50123084", "0.5007408", "0.5000728", "0.4988556" ]
0.67338073
1
Find all dependencies of the given revision caused by the given parent commit. This will be called multiple times for merge commits which have multiple parents.
def find_dependencies_with_parent(self, dependent, parent): self.logger.debug(" Finding dependencies of %s via parent %s" % (dependent.hex[:8], parent.hex[:8])) diff = self.repo.diff(parent, dependent, context_lines=self.options.context_lines) for patch in diff: path = patch.delta.old_file.path self.logger.debug(" Examining hunks in %s" % path) for hunk in patch.hunks: self.blame_hunk(dependent, parent, path, hunk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_dependencies(self, revision: Dict) -> List[Dict]:\n dependency_ids = revision['auxiliary']['phabricator:depends-on']\n revisions = self.get_revisions(phids=dependency_ids)\n result = []\n for r in revisions:\n result.append(r)\n sub = self.get_dependencies(r)\n result.extend(sub)\n return result", "def get_projectversion_deps(projectversion_id, session):\n query = \"\"\"\n WITH RECURSIVE getparents(projectversion_id, dependency_id) AS (\n SELECT projectversion_id, dependency_id\n FROM projectversiondependency\n WHERE projectversion_id = :projectversion_id\n\n UNION ALL\n\n SELECT s2.projectversion_id, s2.dependency_id\n FROM projectversiondependency s2, getparents s1\n WHERE s2.projectversion_id = s1.dependency_id\n )\n SELECT projectversion_id, dependency_id FROM getparents;\n \"\"\"\n result = session.execute(query, {\"projectversion_id\": projectversion_id})\n\n projectversion_ids = []\n\n for row in result:\n projectversion_ids.append(row[1])\n\n return projectversion_ids", "def find_dependencies(self, dependent_rev, recurse=None):\n if recurse is None:\n recurse = self.options.recurse\n\n try:\n dependent = self.get_commit(dependent_rev)\n except InvalidCommitish as e:\n abort(e.message())\n\n self.todo.append(dependent)\n self.todo_d[dependent.hex] = True\n\n while self.todo:\n sha1s = [commit.hex[:8] for commit in self.todo]\n self.logger.debug(\"TODO list: %s\" % \" \".join(sha1s))\n dependent = self.todo.pop(0)\n del self.todo_d[dependent.hex]\n self.logger.debug(\"Processing %s from TODO list\" %\n dependent.hex[:8])\n self.notify_listeners('new_commit', dependent)\n\n for parent in dependent.parents:\n self.find_dependencies_with_parent(dependent, parent)\n self.done.append(dependent.hex)\n self.done_d[dependent.hex] = True\n self.logger.debug(\"Found all dependencies for %s\" %\n dependent.hex[:8])\n # A commit won't have any dependencies if it only added new files\n dependencies = self.dependencies.get(dependent.hex, {})\n self.notify_listeners('dependent_done', dependent, dependencies)\n\n self.notify_listeners('all_done')", "def find_dependants_recurse(key, rev_tree, previous=None):\n if previous is None:\n previous = set()\n if not key in rev_tree:\n return []\n this_level_dependants = set(rev_tree[key])\n next_level_dependants = set()\n for dependant in this_level_dependants:\n if dependant in previous:\n continue\n tmp_previous = previous.copy()\n tmp_previous.add(dependant)\n next_level_dependants.update(\n find_dependants_recurse(dependant, rev_tree,\n previous=tmp_previous,\n ))\n # ensures reloading order on the final list\n # by postponing the reload of modules in this level\n # that also appear later on the tree\n dependants = (list(this_level_dependants.difference(\n next_level_dependants)) +\n list(next_level_dependants))\n return dependants", "def get_revision_dependencies(self, revision_name: str) -> List[Set[str]]:\n revisions: Dict[str, DBRevision] = self.load_revisions()\n revision_layers: List[Set[str]] = [{revision_name}]\n\n while True:\n new_layer: Set[str] = set()\n for rev in revision_layers[-1]:\n new_layer = new_layer.union(revisions[rev].dependencies)\n\n if len(new_layer) == 0:\n break\n\n revision_layers.append(new_layer)\n revision_layers.reverse()\n return revision_layers", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for reference in self.references:\n if isinstance(reference.ref_cell, Cell):\n if recursive:\n dependencies.update(reference.ref_cell.get_dependencies(True))\n dependencies.add(reference.ref_cell)\n return dependencies", "def get_parent_depends(self):\n union_parent_depends = set()\n for name, parent in self.parents.items():\n union_parent_depends.update(set(parent.depends))\n return union_parent_depends", "def find_dependencies(root):\n \n symbol_table = create_symbol_table(root)\n\n names = []\n #Set the depth of the root node\n set_depth(root, 0)\n #Stack of nodes to visit\n stack = Stack(root)\n \n #List of (src, dest) of dependencies\n dependency_table = DTable(symbol_table=symbol_table)\n\n for node, children, ntype in stack:\n \n stack.check_and_push_scope()\n\n #A Name is being loaded, therefore \n if ntype == \"Name\" and is_load(children):\n \"\"\"\n \"\"\"\n dependency_table.append( (stack.scopes, node))\n \n elif ntype == \"Assign\":\n #TODO need to add assignments and then revoke them\n #for child in children:\n #print children\n pass\n\n \n elif ntype == \"Attribute\":\n #TODO: attribute chains can be arbitrarily long\n #dep_dest = \"{}.{}\".format(node.value.id, node.attr)\n #print \"{} => {}\".format(scopes_to_str(scopes), dep_dest)\n\n #TODO: Can't just do dependency_table.append( (scopes, node))\n #since the unique_id function won't match the create the dep string like \n #{node.value.id}.{node.attr}.\n #Either generalize unique_id or something else.\n \n #Don't add children\n continue\n \n set_lineno(node, children)\n #Add children to stack\n #This musn't always be performed\n for child in children[::-1]:\n set_depth(child, node.depth + 1)\n stack.append(child)\n\n print \"dependency table is \"\n print dependency_table", "def compute_dependencies(repositories, requirement, transitive=False):\n pool = Pool(repositories)\n neighbors = _neighbors_in_repositories(pool, transitive)\n dependencies = _neighbors_for_requirement(pool, neighbors, requirement)\n return dependencies", "def _resolveCommits(self):\n \n self.logger.info(\"Resolving {0} commit parents\".format(sum(map((lambda x : len([p for p in self._commits[x].parents if isinstance(self._commits[x].parents[p], str)])), self._commits))))\n for hashKey in self._commits:\n for parentKey in self._commits[hashKey].parents:\n if isinstance(self._commits[hashKey].parents[parentKey], str):\n self.logger.debug(\"Replacing parent key {0} with actual commit\".format(parentKey))\n self._commits[hashKey].parents[parentKey] = self._commits[parentKey]", "def parents(rev):\n return (\n subprocess.check_output([\"git\", \"rev-list\", \"-n\", \"1\", \"--parents\", rev])\n .decode()\n .strip()\n .split(\" \")[1:]\n )", "def dependencies(artifact_or_id):\n artifact = r.coerce_to_artifact(artifact_or_id)\n visited = []\n queue = [artifact]\n while queue:\n a, *queue = queue\n\n if a in visited:\n continue\n\n visited.append(a)\n queue.extend(_artifact_branches(a))\n\n visited.reverse()\n return visited", "def dependencies(self):\n tree_to_heads = {}\n for tree in reversed(list(self.all_subtrees())):\n if len(tree):\n head = tree.head()\n assert head.span() in tree_to_heads\n tree_to_heads[tree.span()] = tree_to_heads[head.span()]\n\n for subtree in tree:\n subhead = tree_to_heads[subtree.span()]\n if subhead.span() != head.span():\n yield (head, subhead)\n else:\n tree_to_heads[tree.span()] = tree", "def closure(cls, roots: Iterable[ClasspathEntry]) -> Iterator[ClasspathEntry]:\n\n visited = set()\n queue = deque(roots)\n while queue:\n ct = queue.popleft()\n if ct in visited:\n continue\n visited.add(ct)\n yield ct\n queue.extend(ct.dependencies)", "def get_dependencies(self, recursive=False):\n dependencies = set()\n for element in self.elements:\n if isinstance(element, CellReference) or isinstance(\n element, CellArray):\n if recursive:\n dependencies.update(\n element.ref_cell.get_dependencies(True))\n dependencies.add(element.ref_cell)\n return dependencies", "def compute_reverse_dependencies(repositories, requirement, transitive=False):\n pool = Pool(repositories)\n reverse_neighbors = _reverse_neighbors_in_repositories(pool, transitive)\n dependencies = _neighbors_for_requirement(pool, reverse_neighbors,\n requirement)\n return dependencies", "def getAllParents(self,childName):\n\tgraph = \"digraph G {\"\n\tquery = \"SELECT svid FROM SpecificVersion WHERE svName='%s';\"%(childName,)\n\ttup = self.fetchOne(query)\n\tdList = [childName]\n\tidList= []\n\tdict = {}\n\tdictId= {}\n\tif tup and tup[0]:\n\t svid=int(tup[0])\n\t idList.append(svid)\n\t dict[svid]=childName\n\t while 1:\n\t try:\n\t svid=idList[0]\n\t except:\n\t break\n\t query = \"\"\"SELECT svName,svid FROM SpecificVersion,PathDepend WHERE\n\t childId='%s' AND parentId=svid\"\"\"%svid\n\t tup = self.fetchAll(query)\n\t parentList = []\n\t for item in tup:\n\t name = item[0]\n\t\t id = int(item[1])\n\t\t dict[id]=name\n\t if not dList.count(name): dList.append(name)\n\t if svid==id: # something wrong, e.g. parentId=childId\n\t\t print \"ERROR: while lookup PathDepend with query\"\n\t\t print query\n\t\t print \"found parentId('%s')=childId('%s')\"%(svid,id)\n\t\t break\n\t idList.append(id)\n\t\t parentList.append(id)\n\t graph+=\"\"\"\\n\"%s\"->\"%s\";\"\"\"%(dict[svid],name)\n\t try:\n\t dictId[svid]=parentList\n\t idList.remove(svid)\n\t except:\n\t break\n\tgraph+=\"\\n}\\n\"\n\treturn dList,idList,dict,dictId,graph", "def get_dependencies(graph: Graph, node: Node):\n dependencies: Set[Node] = set()\n def traverse_nodes(nodes):\n for candidate in nodes:\n if candidate not in dependencies:\n dependencies.add(candidate)\n traverse_nodes(graph[candidate])\n traverse_nodes(graph[node])\n dependencies.discard(node)\n return dependencies", "def dependencies(self, options):\n if not 'dependencies' in self.cscript:\n return\n\n if len(inspect.getfullargspec(self.cscript['dependencies']).args) == 2:\n self_options = copy.copy(options)\n self.add_defaults(self_options)\n deps = self.call('dependencies', self_options)\n else:\n log_normal(\"Deprecated cscript dependencies() method with no options parameter\")\n deps = self.call('dependencies')\n\n # Loop over our immediate dependencies\n for d in deps:\n dep = globals.trees.get(d[0], d[1], self.target, self.name)\n\n # deps only get their options from the parent's cscript\n dep_options = d[2] if len(d) > 2 else {}\n for i in dep.dependencies(dep_options):\n yield i\n yield (dep, dep_options, self)", "def get_dependency_configurations(self):\n deps = []\n\n for variant in self.resolve_variants():\n # Note: the variants have already been resolved\n # This for loop simply needs to resolve the dependencies one\n # by one, potentially overwriding earlier ones\n name, value = next(iter(variant.items()))\n if 'requires' in value and value['requires'] is not None:\n requires = value['requires']\n for req_name, req_config in requires.items():\n deps.append((req_name, req_config['version']))\n\n return deps", "def get_rdeps(deps):\n rdeps = set()\n current = set(deps)\n while current:\n rdeps |= current\n new = set()\n for dep in current:\n new |= set(deps_cache[dep])\n current = new\n return rdeps", "def dependencies(self, dep_context):\n if self.strict_deps:\n return self.target.strict_dependencies(dep_context)\n else:\n return list(self.target.closure(bfs=True, **dep_context.target_closure_kwargs))", "def _get_dependencies(self, requirement_name, version):\n pkg_metadata = self._get_metadata(requirement_name)\n versions = pkg_metadata.get('versions', dict())\n version = versions.get(str(version), dict())\n return sorted(version.get('dependencies', dict()).items())", "def dependencies(self, dep_context):\n if self.strict_deps:\n return strict_dependencies(self.target, dep_context)\n else:\n return all_dependencies(self.target, dep_context)", "def check_deps(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tcfg = self.cfg\n\t\tself.log('PHASE: dependencies', level=logging.DEBUG)\n\t\tself.pause_point('\\nNow checking for dependencies between modules', print_input=False, level=3)\n\t\t# Get modules we're going to build\n\t\tto_build = [\n\t\t\tself.shutit_map[module_id] for module_id in self.shutit_map\n\t\t\tif module_id in cfg and cfg[module_id]['shutit.core.module.build']\n\t\t]\n\t\t# Add any deps we may need by extending to_build and altering cfg\n\t\tfor module in to_build:\n\t\t\tself.resolve_dependencies(to_build, module)\n\n\t\t# Dep checking\n\t\tdef err_checker(errs, triples):\n\t\t\t\"\"\"Collate error information.\n\t\t\t\"\"\"\n\t\t\tnew_triples = []\n\t\t\tfor err, triple in zip(errs, triples):\n\t\t\t\tif not err:\n\t\t\t\t\tnew_triples.append(triple)\n\t\t\t\t\tcontinue\n\t\t\t\tfound_errs.append(err)\n\t\t\treturn new_triples\n\n\t\tfound_errs = []\n\t\ttriples = []\n\t\tfor depender in to_build:\n\t\t\tfor dependee_id in depender.depends_on:\n\t\t\t\ttriples.append((depender, self.shutit_map.get(dependee_id), dependee_id))\n\n\t\ttriples = err_checker([ self.check_dependee_exists(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ self.check_dependee_build(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\t\ttriples = err_checker([ check_dependee_order(depender, dependee, dependee_id) for depender, dependee, dependee_id in triples ], triples)\n\n\t\tif found_errs:\n\t\t\treturn [(err,) for err in found_errs]\n\n\t\tself.log('Modules configured to be built (in order) are: ', level=logging.DEBUG)\n\t\tfor module_id in self.module_ids():\n\t\t\tmodule = self.shutit_map[module_id]\n\t\t\tif cfg[module_id]['shutit.core.module.build']:\n\t\t\t\tself.log(module_id + ' ' + str(module.run_order), level=logging.DEBUG)\n\t\tself.log('\\n', level=logging.DEBUG)\n\n\t\treturn []", "def dependent_prs(self):\n comments = self.data['body'].replace('\\r\\n', ' ')\n for comment in self.comments():\n comments += comment['body'].replace('\\r\\n', ' ')\n\n dependent_prs = []\n dependent_keywords = ['depends on']\n for keyword in dependent_keywords:\n pattern = r'%s %s/(\\S+)/(\\S+)/pull/(\\d+)' % (keyword, GITHUB)\n LOGGER.info(\"Finding dependent PRs by '%s' in the comments\")\n dependent_prs += re.findall(pattern, comments)\n return set(dependent_prs)", "def get_dependencies(self, resource):\n\n rel_path = resource.relative_path\n deps = self.deps[rel_path] if rel_path in self.deps \\\n else self.update_deps(resource)\n return deps", "def dependencies(spec, request=\"all\"):\n if request not in (\"none\", \"direct\", \"all\"):\n message = \"Wrong value for argument 'request' : \"\n message += \"should be one of ('none', 'direct', 'all')\"\n raise tty.error(message + \" [current value is '%s']\" % request)\n\n if request == \"none\":\n return []\n\n if request == \"direct\":\n return spec.dependencies(deptype=(\"link\", \"run\"))\n\n # FIXME : during module file creation nodes seem to be visited multiple\n # FIXME : times even if cover='nodes' is given. This work around permits\n # FIXME : to get a unique list of spec anyhow. Do we miss a merge\n # FIXME : step among nodes that refer to the same package?\n seen = set()\n seen_add = seen.add\n deps = sorted(\n spec.traverse(order=\"post\", cover=\"nodes\", deptype=(\"link\", \"run\"), root=False),\n reverse=True,\n )\n return [d for d in deps if not (d in seen or seen_add(d))]", "def heads(self):\n self.repo.lock_read()\n try:\n all_revs = self.repo.all_revision_ids()\n parent_map = self.repo.get_parent_map(all_revs)\n all_parents = set()\n map(all_parents.update, parent_map.itervalues())\n return set([self.lookup_changeset_id_by_revid(revid)[0]\n for revid in set(all_revs) - all_parents])\n finally:\n self.repo.unlock()", "def dependents(sent,head): # head: node address\n return sorted(chain.from_iterable(sent.nodes[head]\\\n ['deps'].values()))" ]
[ "0.65092677", "0.6238572", "0.6223792", "0.5911293", "0.5791384", "0.56604487", "0.56449336", "0.5590153", "0.55886996", "0.5547283", "0.55453926", "0.5428988", "0.53039974", "0.5298665", "0.52890277", "0.5216612", "0.51291275", "0.51100653", "0.510938", "0.5103599", "0.50664777", "0.5022271", "0.50100434", "0.4990599", "0.4984182", "0.49826112", "0.4976005", "0.4952893", "0.49525368", "0.49444047" ]
0.6811225
0
Run git blame on the parts of the hunk which exist in the older commit in the diff. The commits generated by git blame are the commits which the newer commit in the diff depends on, because without the lines from those commits, the hunk would not apply correctly.
def blame_hunk(self, dependent, parent, path, hunk): first_line_num = hunk.old_start line_range_before = "-%d,%d" % (hunk.old_start, hunk.old_lines) line_range_after = "+%d,%d" % (hunk.new_start, hunk.new_lines) self.logger.debug(" Blaming hunk %s @ %s" % (line_range_before, parent.hex[:8])) if not self.tree_lookup(path, parent): # This is probably because dependent added a new directory # which was not previously in the parent. return cmd = [ 'git', 'blame', '--porcelain', '-L', "%d,+%d" % (hunk.old_start, hunk.old_lines), parent.hex, '--', path ] blame = subprocess.check_output(cmd) dependent_sha1 = dependent.hex if dependent_sha1 not in self.dependencies: self.logger.debug(' New dependent: %s (%s)' % (dependent_sha1[:8], self.oneline(dependent))) self.dependencies[dependent_sha1] = {} self.notify_listeners('new_dependent', dependent) line_to_culprit = {} for line in blame.split('\n'): # self.logger.debug(' !' + line.rstrip()) m = re.match('^([0-9a-f]{40}) (\d+) (\d+)( \d+)?$', line) if not m: continue dependency_sha1, orig_line_num, line_num = m.group(1, 2, 3) line_num = int(line_num) dependency = self.get_commit(dependency_sha1) line_to_culprit[line_num] = dependency.hex if self.is_excluded(dependency): self.logger.debug( ' Excluding dependency %s from line %s (%s)' % (dependency_sha1[:8], line_num, self.oneline(dependency))) continue if dependency_sha1 not in self.dependencies[dependent_sha1]: if dependency_sha1 in self.todo_d: self.logger.debug( ' Dependency %s via line %s already in TODO' % (dependency_sha1[:8], line_num,)) continue if dependency_sha1 in self.done_d: self.logger.debug( ' Dependency %s via line %s already done' % (dependency_sha1[:8], line_num,)) continue self.logger.debug( ' New dependency %s via line %s (%s)' % (dependency_sha1[:8], line_num, self.oneline(dependency))) self.dependencies[dependent_sha1][dependency_sha1] = {} self.notify_listeners('new_commit', dependency) self.notify_listeners('new_dependency', dependent, dependency, path, line_num) if dependency_sha1 not in self.dependencies: if self.options.recurse: self.todo.append(dependency) self.todo_d[dependency.hex] = True self.logger.debug(' added to TODO') dep_sources = self.dependencies[dependent_sha1][dependency_sha1] if path not in dep_sources: dep_sources[path] = {} self.notify_listeners('new_path', dependent, dependency, path, line_num) if line_num in dep_sources[path]: abort("line %d already found when blaming %s:%s" % (line_num, parent.hex[:8], path)) dep_sources[path][line_num] = True self.notify_listeners('new_line', dependent, dependency, path, line_num) diff_format = ' |%8.8s %5s %s%s' hunk_header = '@@ %s %s @@' % (line_range_before, line_range_after) self.logger.debug(diff_format % ('--------', '-----', '', hunk_header)) line_num = hunk.old_start for line in hunk.lines: if "\n\\ No newline at end of file" == line.content.rstrip(): break if line.origin == '+': rev = ln = '' else: rev = line_to_culprit[line_num] ln = line_num line_num += 1 self.logger.debug(diff_format % (rev, ln, line.origin, line.content.rstrip()))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_blame(blameoutput):\n lines = blameoutput.split('\\n')\n i = 0\n commits = {}\n\n while i < len(lines):\n # Read a commit line and parse it.\n line = lines[i]\n i += 1\n if not line.strip():\n continue\n commitline = line.split()\n commithash = commitline[0]\n lineno_then = int(commitline[1])\n lineno_now = int(commitline[2])\n\n try:\n commit = commits[commithash]\n except KeyError:\n commit = Commit(commithash)\n commits[commithash] = commit\n\n # Read commit details until we find a context line.\n while i < len(lines):\n line = lines[i]\n i += 1\n if line.startswith('\\t'):\n break\n\n try:\n key, value = line.split(' ', 1)\n except ValueError:\n key = line\n value = True\n setattr(commit, key.replace('-', '_'), value)\n\n context = line[1:]\n\n yield BlameLine(commit, context, lineno_then, lineno_now, False)", "def hunks_from_last_commits(n):\n \n assert n > 0\n \n diff_output = subprocess.check_output([\"git\", \"diff\", \"HEAD~\"+str(n) , \"--diff-filter=d\", \"--unified=0\"]\n ).decode(\"utf-8\").splitlines()\n \n return _hunks_from_diff(diff_output)", "def apply_patches(ctx, patches, branch, die_on_fail=True):\n ctx.runprocess(['git', 'checkout',\n '%s/%s' % (ctx.config['remote'], branch)])\n for patch in patches:\n print('Applying to %s: %s' % (branch, patch.subject))\n res = ctx.runprocess(\n ['git', 'am', '--3way'],\n stdin_string=''.join(patch.lines),\n check_returncode=0 if die_on_fail else None,\n )\n if not die_on_fail and res.returncode:\n raise RuntimeError(res.stderr)\n sha1 = ctx.runprocess(['git', 'rev-parse', 'HEAD']).stdout.strip()\n if ctx.verbosity:\n print('Resulting hash: %s' % sha1)\n return sha1", "def parse_hunks(diff: str) -> list[Hunk]:\n diff_pattern = (\n r\"diff --git a/.* b/(.*)\\n\" # capture file name\n r\"(?:\\w+ file mode \\d+\\n)?\" # maybe 'new file mode 100644' or similar\n r\"index .*\\n\"\n r\"--- .*\\n\"\n r\"\\+\\+\\+ .*\\n\"\n )\n\n # capture line number and length from header\n hunk_header_pattern = r\"@@ -\\d+,\\d+ \\+(\\d+),(\\d+) @@.*\\n\"\n\n # ignore initial empty match\n raw_per_file_hunks = re.split(diff_pattern, diff)[1:]\n\n parsed_hunks = []\n\n for file, raw_hunks in batch(raw_per_file_hunks, 2):\n # ignore initial empty match\n hunks = re.split(hunk_header_pattern, raw_hunks, re.MULTILINE)[1:]\n for start, length, body in batch(hunks, 3):\n lines = body.split(\"\\n\")\n lines = lines if lines[-1] else lines[:-1] # trim empty line\n parsed_hunks.append(Hunk(file, int(start), int(length), lines))\n\n return parsed_hunks", "def clean_diff(diff):\n res = []\n skip = True\n for line in diff.split('\\n'):\n if line.startswith('diff --git'):\n skip = True\n if line.startswith('@@ '):\n skip = False\n if not skip:\n res.append(line)\n return '\\n'.join(res)", "def blame_incremental(self, rev: str | HEAD, file: str, **kwargs: Any) -> Iterator[\"BlameEntry\"]:\n\n data: bytes = self.git.blame(rev, \"--\", file, p=True, incremental=True, stdout_as_string=False, **kwargs)\n commits: Dict[bytes, Commit] = {}\n\n stream = (line for line in data.split(b\"\\n\") if line)\n while True:\n try:\n line = next(stream) # when exhausted, causes a StopIteration, terminating this function\n except StopIteration:\n return\n split_line = line.split()\n hexsha, orig_lineno_b, lineno_b, num_lines_b = split_line\n lineno = int(lineno_b)\n num_lines = int(num_lines_b)\n orig_lineno = int(orig_lineno_b)\n if hexsha not in commits:\n # Now read the next few lines and build up a dict of properties\n # for this commit\n props: Dict[bytes, bytes] = {}\n while True:\n try:\n line = next(stream)\n except StopIteration:\n return\n if line == b\"boundary\":\n # \"boundary\" indicates a root commit and occurs\n # instead of the \"previous\" tag\n continue\n\n tag, value = line.split(b\" \", 1)\n props[tag] = value\n if tag == b\"filename\":\n # \"filename\" formally terminates the entry for --incremental\n orig_filename = value\n break\n\n c = Commit(\n self,\n hex_to_bin(hexsha),\n author=Actor(\n safe_decode(props[b\"author\"]),\n safe_decode(props[b\"author-mail\"].lstrip(b\"<\").rstrip(b\">\")),\n ),\n authored_date=int(props[b\"author-time\"]),\n committer=Actor(\n safe_decode(props[b\"committer\"]),\n safe_decode(props[b\"committer-mail\"].lstrip(b\"<\").rstrip(b\">\")),\n ),\n committed_date=int(props[b\"committer-time\"]),\n )\n commits[hexsha] = c\n else:\n # Discard all lines until we find \"filename\" which is\n # guaranteed to be the last line\n while True:\n try:\n line = next(stream) # will fail if we reach the EOF unexpectedly\n except StopIteration:\n return\n tag, value = line.split(b\" \", 1)\n if tag == b\"filename\":\n orig_filename = value\n break\n\n yield BlameEntry(\n commits[hexsha],\n range(lineno, lineno + num_lines),\n safe_decode(orig_filename),\n range(orig_lineno, orig_lineno + num_lines),\n )", "def FormatDiffHunks(hunks):\n r = []\n last_header = None\n for hunk in hunks:\n this_header = hunk.header[0:2]\n if last_header != this_header:\n r.extend(hunk.header)\n last_header = this_header\n else:\n r.extend(hunk.header[2])\n r.extend(hunk.lines)\n r.append(\"\\n\")\n return \"\".join(r)", "def get_bisect_all(good_commits, bad_commit):\n # Could also be combined with --bisect-vars, that may be more efficient.\n args = [bad_commit] + [f\"^{commit}\" for commit in good_commits]\n lines = (\n subprocess.check_output([\"git\", \"rev-list\", \"--bisect-all\"] + args)\n .decode()\n .splitlines()\n )\n # first is furthest away, last is equal to bad\n commits = [line.split(\" \")[0] for line in lines]\n return commits", "def get_commits_in_branch(branch_name):\n output = subprocess.check_output(\"git log --pretty=format:'{}' {} {}\".format(git_format, branch_name, args.extra_args), shell=True)\n lines = output.decode(\"utf-8\").split(\"\\n\")\n out = []\n for line in lines:\n if len(line) <= 1: break\n [sha, author, message] = line.split(\"\t\", 2)\n out.append((sha, author, message))\n out.reverse()\n return out", "def git_sequence_editor_squash(fpath):\n # print(sys.argv)\n import utool as ut\n text = ut.read_from(fpath)\n # print('fpath = %r' % (fpath,))\n print(text)\n # Doesnt work because of fixed witdth requirement\n # search = (ut.util_regex.positive_lookbehind('[a-z]* [a-z0-9]* wip\\n') + 'pick ' +\n # ut.reponamed_field('hash', '[a-z0-9]*') + ' wip')\n # repl = ('squash ' + ut.bref_field('hash') + ' wip')\n # import re\n # new_text = re.sub(search, repl, text, flags=re.MULTILINE)\n # print(new_text)\n prev_msg = None\n prev_dt = None\n new_lines = []\n\n def get_commit_date(hashid):\n out, err, ret = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False, quiet=True, pad_stdout=False)\n # from datetime import datetime\n from dateutil import parser\n # print('out = %r' % (out,))\n stamp = out.strip('\\n')\n # print('stamp = %r' % (stamp,))\n dt = parser.parse(stamp)\n # dt = datetime.strptime(stamp, '%Y-%m-%d %H:%M:%S %Z')\n # print('dt = %r' % (dt,))\n return dt\n\n for line in text.split('\\n'):\n commit_line = line.split(' ')\n if len(commit_line) < 3:\n prev_msg = None\n prev_dt = None\n new_lines += [line]\n continue\n action = commit_line[0]\n hashid = commit_line[1]\n msg = ' ' .join(commit_line[2:])\n try:\n dt = get_commit_date(hashid)\n except ValueError:\n prev_msg = None\n prev_dt = None\n new_lines += [line]\n continue\n orig_msg = msg\n can_squash = action == 'pick' and msg == 'wip' and prev_msg == 'wip'\n if prev_dt is not None and prev_msg == 'wip':\n tdelta = dt - prev_dt\n # Only squash closely consecutive commits\n threshold_minutes = 45\n td_min = (tdelta.total_seconds() / 60.)\n # print(tdelta)\n can_squash &= td_min < threshold_minutes\n msg = msg + ' -- tdelta=%r' % (ut.get_timedelta_str(tdelta),)\n if can_squash:\n new_line = ' ' .join(['squash', hashid, msg])\n new_lines += [new_line]\n else:\n new_lines += [line]\n prev_msg = orig_msg\n prev_dt = dt\n new_text = '\\n'.join(new_lines)\n\n def get_commit_date(hashid):\n out = ut.cmd('git show -s --format=%ci ' + hashid, verbose=False)\n print('out = %r' % (out,))\n\n # print('Dry run')\n # ut.dump_autogen_code(fpath, new_text)\n print(new_text)\n ut.write_to(fpath, new_text, n=None)", "def svn_diff_hunk_readline_modified_text(*args):\n return _diff.svn_diff_hunk_readline_modified_text(*args)", "def branch_name(self):\n return f'phab-diff-{self.diff_id}'", "def check_diffs():\n process = Popen([\"git\", \"diff\", \"HEAD^\", \"--name-only\"], stdout=PIPE)\n\n diff, stderr = process.communicate()\n\n if process.returncode !=0:\n raise Exception(\"Unable to do git diff\")\n return diff.splitlines(False)", "def blame(\n self,\n rev: Union[str, HEAD],\n file: str,\n incremental: bool = False,\n rev_opts: Optional[List[str]] = None,\n **kwargs: Any,\n ) -> List[List[Commit | List[str | bytes] | None]] | Iterator[BlameEntry] | None:\n if incremental:\n return self.blame_incremental(rev, file, **kwargs)\n rev_opts = rev_opts or []\n data: bytes = self.git.blame(rev, *rev_opts, \"--\", file, p=True, stdout_as_string=False, **kwargs)\n commits: Dict[str, Commit] = {}\n blames: List[List[Commit | List[str | bytes] | None]] = []\n\n class InfoTD(TypedDict, total=False):\n sha: str\n id: str\n filename: str\n summary: str\n author: str\n author_email: str\n author_date: int\n committer: str\n committer_email: str\n committer_date: int\n\n info: InfoTD = {}\n\n keepends = True\n for line_bytes in data.splitlines(keepends):\n try:\n line_str = line_bytes.rstrip().decode(defenc)\n except UnicodeDecodeError:\n firstpart = \"\"\n parts = []\n is_binary = True\n else:\n # As we don't have an idea when the binary data ends, as it could contain multiple newlines\n # in the process. So we rely on being able to decode to tell us what is is.\n # This can absolutely fail even on text files, but even if it does, we should be fine treating it\n # as binary instead\n parts = self.re_whitespace.split(line_str, 1)\n firstpart = parts[0]\n is_binary = False\n # end handle decode of line\n\n if self.re_hexsha_only.search(firstpart):\n # handles\n # 634396b2f541a9f2d58b00be1a07f0c358b999b3 1 1 7 - indicates blame-data start\n # 634396b2f541a9f2d58b00be1a07f0c358b999b3 2 2 - indicates\n # another line of blame with the same data\n digits = parts[-1].split(\" \")\n if len(digits) == 3:\n info = {\"id\": firstpart}\n blames.append([None, []])\n elif info[\"id\"] != firstpart:\n info = {\"id\": firstpart}\n blames.append([commits.get(firstpart), []])\n # END blame data initialization\n else:\n m = self.re_author_committer_start.search(firstpart)\n if m:\n # handles:\n # author Tom Preston-Werner\n # author-mail <[email protected]>\n # author-time 1192271832\n # author-tz -0700\n # committer Tom Preston-Werner\n # committer-mail <[email protected]>\n # committer-time 1192271832\n # committer-tz -0700 - IGNORED BY US\n role = m.group(0)\n if role == \"author\":\n if firstpart.endswith(\"-mail\"):\n info[\"author_email\"] = parts[-1]\n elif firstpart.endswith(\"-time\"):\n info[\"author_date\"] = int(parts[-1])\n elif role == firstpart:\n info[\"author\"] = parts[-1]\n elif role == \"committer\":\n if firstpart.endswith(\"-mail\"):\n info[\"committer_email\"] = parts[-1]\n elif firstpart.endswith(\"-time\"):\n info[\"committer_date\"] = int(parts[-1])\n elif role == firstpart:\n info[\"committer\"] = parts[-1]\n # END distinguish mail,time,name\n else:\n # handle\n # filename lib/grit.rb\n # summary add Blob\n # <and rest>\n if firstpart.startswith(\"filename\"):\n info[\"filename\"] = parts[-1]\n elif firstpart.startswith(\"summary\"):\n info[\"summary\"] = parts[-1]\n elif firstpart == \"\":\n if info:\n sha = info[\"id\"]\n c = commits.get(sha)\n if c is None:\n c = Commit(\n self,\n hex_to_bin(sha),\n author=Actor._from_string(f\"{info['author']} {info['author_email']}\"),\n authored_date=info[\"author_date\"],\n committer=Actor._from_string(f\"{info['committer']} {info['committer_email']}\"),\n committed_date=info[\"committer_date\"],\n )\n commits[sha] = c\n blames[-1][0] = c\n # END if commit objects needs initial creation\n\n if blames[-1][1] is not None:\n line: str | bytes\n if not is_binary:\n if line_str and line_str[0] == \"\\t\":\n line_str = line_str[1:]\n line = line_str\n else:\n line = line_bytes\n # NOTE: We are actually parsing lines out of binary data, which can lead to the\n # binary being split up along the newline separator. We will append this to the\n # blame we are currently looking at, even though it should be concatenated with\n # the last line we have seen.\n blames[-1][1].append(line)\n\n info = {\"id\": sha}\n # END if we collected commit info\n # END distinguish filename,summary,rest\n # END distinguish author|committer vs filename,summary,rest\n # END distinguish hexsha vs other information\n return blames", "def test_diff_git_line_without_a_b_and_spaces(self):\n diff = (\n b'diff --git foo bar1 foo bar1\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo bar1',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo bar1',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def svn_diff_hunk_get_leading_context(hunk):\n return _diff.svn_diff_hunk_get_leading_context(hunk)", "def approx_lineno_across_revs(filename, newfilename, revision, newrevision,\n lineno):\n # This doesn't work that well if there are a lot of line changes within the\n # hunk (demonstrated by GitHyperBlameLineMotionTest.testIntraHunkLineMotion).\n # A fuzzy heuristic that takes the text of the new line and tries to find a\n # deleted line within the hunk that mostly matches the new line could help.\n\n # Use the <revision>:<filename> syntax to diff between two blobs. This is the\n # only way to diff a file that has been renamed.\n old = '%s:%s' % (revision, filename)\n new = '%s:%s' % (newrevision, newfilename)\n hunks = cache_diff_hunks(old, new)\n\n cumulative_offset = 0\n\n # Find the hunk containing lineno (if any).\n for (oldstart, oldlength), (newstart, newlength) in hunks:\n cumulative_offset += newlength - oldlength\n\n if lineno >= oldstart + oldlength:\n # Not there yet.\n continue\n\n if lineno < oldstart:\n # Gone too far.\n break\n\n # lineno is in [oldstart, oldlength] at revision; [newstart, newlength] at\n # newrevision.\n\n # If newlength == 0, newstart will be the line before the deleted hunk.\n # Since the line must have been deleted, just return that as the nearest\n # line in the new file. Caution: newstart can be 0 in this case.\n if newlength == 0:\n return max(1, newstart)\n\n newend = newstart + newlength - 1\n\n # Move lineno based on the amount the entire hunk shifted.\n lineno = lineno + newstart - oldstart\n # Constrain the output within the range [newstart, newend].\n return min(newend, max(newstart, lineno))\n\n # Wasn't in a hunk. Figure out the line motion based on the difference in\n # length between the hunks seen so far.\n return lineno + cumulative_offset", "def test_with_commit_history(self):\n\t\treview_request = self.create_review_request(create_repository=True, publish=True)\n\t\tdiffset = self.create_diffset(review_request=review_request)\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r1\", parent_id=\"r0\", diff_contents=(b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r2\", parent_id=\"r1\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Hi, world!\\n\"))\n\t\tself.create_diffcommit(diffset=diffset, commit_id=\"r4\", parent_id=\"r3\", diff_contents=(b\"diff --git a/README b/README\\n\" b\"index 197009f..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hi, world!\\n\" b\"+Yo, world.\\n\"))\n\t\tcumulative_diff = b\"diff --git a/ABC b/ABC\\n\" b\"index 94bdd3e..197009f 100644\\n\" b\"--- ABC\\n\" b\"+++ ABC\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-line!\\n\" b\"+line..\\n\" b\"diff --git a/README b/README\\n\" b\"index 94bdd3e..87abad9 100644\\n\" b\"--- README\\n\" b\"+++ README\\n\" b\"@@ -1,1 +1,1 @@\\n\" b\"-Hello, world!\\n\" b\"+Yo, world.\\n\"\n\t\tdiffset.finalize_commit_series(cumulative_diff=cumulative_diff, validation_info=None, validate=False, save=True)\n\t\tresponse = self.client.get(\"/r/%d/diff/raw/\" % review_request.pk)\n\t\tself.assertEqual(response.content, cumulative_diff)", "def _set_old_sha1_for_branch_adds(ctx, prl):\n new_prl = PreReceiveTupleLists()\n # duplicate the other tuples that we don't modify here\n new_prl.del_heads = prl.del_heads\n new_prl.set_tags = prl.set_tags\n new_prl.del_tags = prl.del_tags\n branch_dict = ctx.branch_dict()\n for head in prl.set_heads:\n if head.old_sha1 == p4gf_const.NULL_COMMIT_SHA1:\n # This appears to be a new branch reference, so check if it has\n # a parent somewhere in our previously translated history, and\n # use that commit as the true parent of this new branch, so we\n # avoid doing a whole lot of extra work.\n new_head = _find_true_parent(ctx.repo, head, branch_dict, ctx.repo_dirs.GIT_WORK_TREE)\n if p4gf_object_type.ObjectType.commits_for_sha1(ctx, new_head.old_sha1):\n LOG.info('updated pre-receive-tuple %s', new_head)\n head = new_head\n new_prl.set_heads.append(head)\n return new_prl", "def svn_diff_hunk_readline_diff_text(*args):\n return _diff.svn_diff_hunk_readline_diff_text(*args)", "def svn_diff_hunk_get_modified_start(hunk):\n return _diff.svn_diff_hunk_get_modified_start(hunk)", "def test_normalize_patch_with_git_diff_new_symlink(self):\n self.assertEqual(\n self.tool.normalize_patch(\n patch=(\n b'diff --git /dev/null b/test\\n'\n b'new file mode 120000\\n'\n b'--- /dev/null\\n'\n b'+++ b/test\\n'\n b'@@ -0,0 +1,1 @@\\n'\n b'+target_file\\n'\n b'\\\\ No newline at end of file'\n ),\n filename='test',\n revision=PRE_CREATION),\n (\n b'diff --git /dev/null b/test\\n'\n b'new file mode 100000\\n'\n b'--- /dev/null\\n'\n b'+++ b/test\\n'\n b'@@ -0,0 +1,1 @@\\n'\n b'+target_file\\n'\n b'\\\\ No newline at end of file'\n ))", "def main():\n smart_commit_msg_filename = SMART_COMMIT_MSG_FILENAME\n paths = get_staged_paths()\n if not len(paths):\n raise Exception(\"did you even add anything to staging\")\n paths += [smart_commit_msg_filename]\n mr_edited_file = max(paths, key=lambda k: os.path.getmtime(k))\n if mr_edited_file == smart_commit_msg_filename:\n print(git_commit())\n else:\n print(\"Update the patch notes!\")", "def test_diff_git_line_without_a_b(self):\n diff = (\n b'diff --git foo foo\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def test_noChangeFromTrunk(self):\n runCommand([\"git\", \"checkout\", \"-b\", \"mypatch\"], cwd=self.repo.path)\n\n logs = []\n\n with self.assertRaises(SystemExit) as e:\n CheckNewsfragmentScript(logs.append).main([self.repo.path])\n\n self.assertEqual(e.exception.args, (0,))\n self.assertEqual(\n logs[-1], \"On trunk or no diffs from trunk; no need to look at this.\"\n )", "def changelog_updated(target_branch):\n\n output = subprocess.getoutput(['git diff HEAD origin/{}'.format(target_branch)])\n return 'a/changelog.md b/changelog.md' in output.lower()", "def dump_commit_diff(commit):\n\n for file in commit:\n if file[4] == \"\" or \".\" not in file[4]:\n sys.stdout.flush()\n print((\"Index: \" + file[3] + \" deleted\\r\"))\n sys.stdout.flush()\n else:\n subprocess.call([\n \"cvs\",\n \"-d\",\n file[8],\n \"rdiff\",\n \"-u\",\n \"-r\",\n PostsaiCommitViewer.calculate_previous_cvs_revision(file[4]),\n \"-r\",\n file[4],\n file[3]])", "def update_commits():\n\n conn = sqlite3.connect(rebasedb)\n c = conn.cursor()\n\n cmd = ['git', '-C', chromeos_path, 'log', '--no-merges', '--abbrev=12',\n '--reverse', '--format=%at%x01%ct%x01%h%x01%an%x01%ae%x01%s',\n rebase_baseline() + '..']\n commits = subprocess.check_output(cmd, encoding='utf-8', errors='ignore')\n\n prevdate = 0\n mprevdate = 0\n for commit in commits.splitlines(): # pylint: disable=too-many-nested-blocks\n if commit != '':\n elem = commit.split('\\001', 5)\n authored = elem[0]\n committed = elem[1]\n sha = elem[2]\n contact = elem[3]\n email = elem[4]\n\n if ('@google.com' not in email and '@chromium.org' not in email\n and '@collabora.com' not in email):\n ncontact, nemail = get_contact(chromeos_path, sha)\n if ncontact:\n contact = ncontact\n email = nemail\n\n subject = elem[5].rstrip('\\n')\n\n ps = subprocess.Popen(['git', '-C', chromeos_path, 'show', sha], stdout=subprocess.PIPE)\n spid = subprocess.check_output(['git', '-C', chromeos_path, 'patch-id'],\n stdin=ps.stdout, encoding='utf-8', errors='ignore')\n patchid = spid.split(' ', 1)[0]\n\n # Make sure date is unique and in ascending order.\n date = int(committed)\n if date == prevdate:\n date = mprevdate + 1\n else:\n prevdate = date\n date = date * 1000\n mprevdate = date\n\n # Do nothing if the sha is already in the commit table.\n c.execute(\"select sha from commits where sha='%s'\" % sha)\n found = c.fetchone()\n if found:\n continue\n\n # check for cherry pick lines. If so, record the upstream SHA associated\n # with this commit. Only look for commits which may be upstream or may\n # have been merged from a stable release.\n usha = ''\n if not chromium.match(subject):\n u = upstream.match(subject)\n desc = subprocess.check_output(['git', '-C', chromeos_path, 'show', '-s', sha],\n encoding='utf-8', errors='ignore')\n for d in desc.splitlines():\n m = None\n if u:\n m = cherrypick.search(d)\n else:\n m = stable.search(d)\n if not m:\n m = stable2.search(d)\n if m:\n usha = m.group(2)[:12]\n # The patch may have been picked multiple times; only record\n # the first entry.\n break\n\n # Search for embedded Change-Id string.\n # If found, add it to database.\n desc = subprocess.check_output(['git', '-C', chromeos_path, 'show', '-s', sha],\n encoding='utf-8', errors='ignore')\n for d in desc.splitlines():\n chid = changeid.match(d)\n if chid:\n chid = chid.group(1)\n break\n\n # Initially assume we'll drop everything because it is not listed when\n # running \"rebase -i\". Before doing that, check if the commit is a\n # stable release commit. If so, mark it accordingly.\n reason = 'upstream'\n c.execute(\"select sha from stable where sha is '%s'\" % sha)\n if c.fetchone():\n reason = 'stable'\n\n q = \"\"\"\n INSERT INTO commits(date, created, updated, authored, committed, contact,\n email, sha, usha, patchid, changeid, subject,\n disposition, reason)\n VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\n \"\"\"\n c.execute(q,\n (date, NOW(), NOW(), authored, committed, contact, email,\n sha, usha, patchid, chid, subject, 'drop', reason))\n filenames = subprocess.check_output(\n ['git', '-C', chromeos_path, 'show', '--name-only', '--format=', sha],\n encoding='utf-8', errors='ignore')\n for fn in filenames.splitlines():\n if fn != '':\n c.execute('INSERT INTO files(sha, filename) VALUES (?, ?)',\n (\n sha,\n fn,\n ))\n\n conn.commit()\n\n # \"git cherry -v <target>\" on branch rebase_baseline gives us a list\n # of patches to apply.\n patches = subprocess.check_output(\n ['git', '-C', chromeos_path, 'cherry', '-v', rebase_target_tag()],\n encoding='utf-8', errors='ignore')\n for patch in patches.splitlines():\n elem = patch.split(' ', 2)\n # print(\"patch: \" + patch)\n # print(\"elem[0]: '%s' elem[1]: '%s' elem[2]: '%s'\" % (elem[0], elem[1], elem[2]))\n if elem[0] == '+':\n # patch not found upstream\n sha = elem[1][:12]\n # Try to find patch in stable branch. If it is there, drop it after all.\n # If not, we may need to apply it.\n c.execute(\"select sha, origin from stable where sha is '%s'\" % sha)\n found = c.fetchone()\n if found:\n c.execute(\n \"UPDATE commits SET disposition=('drop') where sha='%s'\" %\n sha)\n c.execute(\"UPDATE commits SET reason=('%s') where sha='%s'\" %\n (found[1], sha))\n c.execute(\"UPDATE commits SET updated=('%d') where sha='%s'\" %\n (NOW(), sha))\n else:\n # We need to check if the commit is already marked as drop\n # with a reason other than \"upstream\". If so, don't update it.\n c.execute(\n \"select disposition, reason from commits where sha='%s'\" %\n sha)\n found = c.fetchone()\n if found and found[0] == 'drop' and found[1] == 'upstream':\n c.execute(\n \"UPDATE commits SET disposition=('pick') where sha='%s'\"\n % sha)\n c.execute(\"UPDATE commits SET reason=('') where sha='%s'\" %\n sha)\n c.execute(\n \"UPDATE commits SET updated=('%d') where sha='%s'\" %\n (NOW(), sha))\n\n conn.commit()\n conn.close()", "def test_diff_git_line_without_a_b_quotes(self):\n diff = (\n b'diff --git \"foo\" \"foo\"\\n'\n b'deleted file mode 100644\\n'\n b'index 612544e4343bf04967eb5ea80257f6c64d6f42c7..'\n b'0000000000000000000000000000000000000000\\n'\n )\n\n parsed_files = self.tool.get_parser(diff).parse()\n self.assertEqual(len(parsed_files), 1)\n\n self.assert_parsed_diff_file(\n parsed_files[0],\n orig_filename=b'foo',\n orig_file_details=b'612544e4343bf04967eb5ea80257f6c64d6f42c7',\n modified_filename=b'foo',\n modified_file_details=b'0000000000000000000000000000000000000000',\n old_unix_mode='100644',\n deleted=True,\n data=diff)", "def get_diffs(history):\n\n # First get all possible representations\n mgr = plugins_get_mgr() \n keys = mgr.search('representation')['representation']\n representations = [mgr.get_by_key('representation', k) for k in keys]\n\n for i in range(len(history)):\n if i+1 > len(history) - 1:\n continue\n\n prev = history[i]\n curr = history[i+1]\n\n #print(prev['subject'], \"==>\", curr['subject'])\n #print(curr['changes'])\n for c in curr['changes']:\n \n path = c['path']\n\n # Skip the metadata file\n if c['path'].endswith('datapackage.json'): \n continue \n\n # Find a handler for this kind of file...\n handler = None \n for r in representations: \n if r.can_process(path): \n handler = r \n break \n \n if handler is None: \n continue \n\n # print(path, \"being handled by\", handler)\n\n v1_hex = prev['commit']\n v2_hex = curr['commit']\n\n temp1 = tempfile.mkdtemp(prefix=\"dgit-diff-\") \n \n try: \n for h in [v1_hex, v2_hex]: \n filename = '{}/{}/checkout.tar'.format(temp1, h)\n try:\n os.makedirs(os.path.dirname(filename))\n except:\n pass \n extractcmd = ['git', 'archive', '-o', filename, h, path]\n output = run(extractcmd)\n if 'fatal' in output: \n raise Exception(\"File not present in commit\") \n with cd(os.path.dirname(filename)): \n cmd = ['tar', 'xvf', 'checkout.tar']\n output = run(cmd) \n if 'fatal' in output: \n print(\"Cleaning up - fatal 1\", temp1)\n shutil.rmtree(temp1)\n continue \n\n # Check to make sure that \n path1 = os.path.join(temp1, v1_hex, path) \n path2 = os.path.join(temp1, v2_hex, path) \n if not os.path.exists(path1) or not os.path.exists(path2): \n # print(\"One of the two output files is missing\") \n shutil.rmtree(temp1)\n continue \n\n #print(path1, path2) \n\n # Now call the handler\n diff = handler.get_diff(path1, path2)\n\n # print(\"Inserting diff\", diff)\n c['diff'] = diff\n\n except Exception as e: \n #traceback.print_exc() \n #print(\"Cleaning up - Exception \", temp1)\n shutil.rmtree(temp1)" ]
[ "0.6053283", "0.6032853", "0.57691056", "0.5739654", "0.5558605", "0.54753315", "0.54249126", "0.53682005", "0.5311587", "0.52980185", "0.5267507", "0.52285916", "0.5224669", "0.5176965", "0.5174912", "0.51497334", "0.51410127", "0.5134518", "0.5132892", "0.5132451", "0.5125252", "0.5119443", "0.51184195", "0.50925285", "0.5092412", "0.5088877", "0.50795114", "0.50792676", "0.5070386", "0.5066822" ]
0.7433148
0
Navigate to the tree or blob object pointed to by the given target path for the given commit. This is necessary because each git tree only contains entries for the directory it refers to, not recursively for all subdirectories.
def tree_lookup(self, target_path, commit): segments = target_path.split("/") tree_or_blob = commit.tree path = '' while segments: dirent = segments.pop(0) if isinstance(tree_or_blob, pygit2.Tree): if dirent in tree_or_blob: tree_or_blob = self.repo[tree_or_blob[dirent].oid] # self.logger.debug('%s in %s' % (dirent, path)) if path: path += '/' path += dirent else: # This is probably because we were called on a # commit whose parent added a new directory. self.logger.debug(' %s not in %s in %s' % (dirent, path, commit.hex[:8])) return None else: self.logger.debug(' %s not a tree in %s' % (tree_or_blob, commit.hex[:8])) return None return tree_or_blob
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def traverse_tree(tree, thisFolder, path, submission):\n\n # Get files directly underneath this folder.\n blobs = tree.blobs\n thisFolderName = tree.name\n\n # Add this folder to the path.\n path = os.path.join(path, thisFolderName)\n print(path)\n\n for blob in blobs:\n filepath = os.path.join(path, blob.name)\n add_source_file(blob.name, thisFolder, filepath, submission)\n\n # Get folders directly underneath this folder.\n folders = tree.trees\n for folder in folders:\n srcFolderObj = add_source_folder(folder.name, thisFolder)[0]\n traverse_tree(folder, srcFolderObj, path, submission)\n\n return", "def git_checkout(self, commit):\n with self.host.cd(self.project_directory, expand=True):\n self.host.run(\"git checkout '%s'\" % esc1(commit))", "def get_git_tree(target):\n\n root = is_system_root(target)\n is_file = os.path.isfile(target)\n folder = os.path.dirname(target) if is_file else target\n if os.path.exists(os.path.join(folder, \".git\")):\n return folder\n else:\n if root:\n return None\n else:\n return get_git_tree(os.path.dirname(folder))", "def checkout_ref(self, commit_id):\n pass", "async def get_tree(repository, ref):\n try:\n tree = await repository.get_tree(ref)\n return tree\n except AIOGitHubException as exception:\n raise HacsException(exception)", "def traverse(tree, path):\n for node in path:\n tree = tree[node]\n return tree", "def directory_contents(self, commit, path):\n\n tree = self._get_tree(commit, path)\n return [c[0] for c in tree]", "def view(request, repo_name, branch, path, commit_sha=None,):\n file_source = diff = \"\"\n\n if path in FILE_BLACK_LIST:\n msg = MSG_NOT_ALLOWED\n return error_view( request, msg)\n \n file_path = path #!!! FIX security\n if path[-1:] == \"/\": path = path[:-1]\n \n repo = get_repo( repo_name )\n commit, tree = get_commit_tree( repo, commit_sha )\n\n if commit.parents:\n diff = get_diff( repo, path, commit.parents[0].hexsha, commit.hexsha )\n\n try:\n tree = tree[path]\n except KeyError:\n msg = MSG_NO_FILE_IN_TREE\n return error_view( request, msg )\n\n if not tree.type is \"blob\":\n msg = MSG_NO_FILE_IN_TREE\n return error_view( request, msg )\n \n mime = tree.mime_type.split(\"/\")\n \n file_source = tree.data_stream[3].read()\n \n #import ipdb; ipdb.set_trace()\n file_meta = dict(\n GITTER_MEDIA_URL = GITTER_MEDIA_URL,\n abspath = tree.abspath,\n mime = tree.mime_type,\n size = tree.size,\n tree = tree,\n path = tree.abspath,\n mime_type = mime[0],\n type = file_type_from_mime(tree.mime_type),\n )\n context = dict(\n GITTER_MEDIA_URL = GITTER_MEDIA_URL,\n file_source = file_source,\n breadcrumbs = make_crumbs(path),\n commit = commit,\n diff = diff,\n file_meta = file_meta,\n repo_name = repo_name,\n branch_name = branch,\n path = path,\n )\n if mime[0] == \"image\":\n import base64\n context[\"img_base\"] = base64.b64encode( file_source )\n\n return mix_response( \n request, \n 'commitlog/view_file.html', \n context)", "def _commit_tree(commit):\n return {\n \"commit\": commit.hexsha,\n \"parents\": [_commit_tree(c) for c in commit.parents],\n \"tree\": commit.tree.hexsha,\n \"author\": str(commit.author),\n \"authored_date\": commit.authored_date,\n \"committer\": str(commit.committer),\n \"committed_date\": commit.committed_date,\n \"message\": commit.message\n }", "async def browse_path(svc: Pytheos, path: str) -> TreeEntry:\n tree = await _init_tree_with_sources(svc)\n\n source_id = None\n current_node = tree\n for comp in path.split('/'):\n # Handle leading, trailing, or duplicate slashes\n if comp == '':\n continue\n\n # Refresh our current node and bail out if it can't be found.\n current_node = current_node.get(comp)\n if current_node is None:\n raise ValueError('Could not find path')\n\n # Retrieve the contents of our new current node\n source_id, results = await _retrieve_contents(svc, source_id, current_node.object)\n for item in results:\n current_node[item.name] = TreeEntry(obj=item)\n\n return current_node", "def hint(target):\n if PathIdentifier.locobjs.has_key(target): return\n if not is_url(target) and not is_wc(target): return\n\n url = target_to_url(target)\n\n root = get_repo_root(url)\n assert root[-1] != \"/\"\n assert url[:len(root)] == root, \"url=%r, root=%r\" % (url, root)\n repo_relative_path = url[len(root):]\n\n try:\n uuid = get_svninfo(target)['Repository UUID']\n uuid_pathid = 'uuid://%s%s' % (uuid, repo_relative_path)\n except KeyError:\n uuid = None\n uuid_pathid = None\n\n locobj = PathIdentifier.locobjs.get(url) or \\\n (uuid_pathid and PathIdentifier.locobjs.get(uuid_pathid))\n if not locobj:\n locobj = PathIdentifier(repo_relative_path, uuid=uuid, url=url)\n\n PathIdentifier.repo_hints[uuid] = root # (uuid may be None)\n\n PathIdentifier.locobjs[target] = locobj\n PathIdentifier.locobjs[url] = locobj\n if uuid_pathid:\n PathIdentifier.locobjs[uuid_pathid] = locobj\n if not PathIdentifier.locobjs.has_key(repo_relative_path):\n PathIdentifier.locobjs[repo_relative_path] = locobj", "def search_in_tree_recurse(root_elem, target):\r\n\t# Guard agains an empty tree\r\n\tif root_elem is None:\r\n\t\treturn False\r\n\tif root_elem.value == target:\r\n\t\treturn True\r\n\treturn search_in_tree_recurse(root_elem.left, target) or search_in_tree_recurse(root_elem.right, target)", "def find_path(t, entry):\n if t.entry == entry:\n return [t.entry]\n else:\n branches = [find_path(branch, entry) for branch in t.branches]\n for branch in branches:\n if branch:\n return [t.entry] + branch\n return False", "def checkout_java_tree(rev, path):\n logging.info(\"Checking out %s in %s\", rev, path)\n os.makedirs(path)\n # Extract java source\n subprocess.check_call([\"bash\", '-o', 'pipefail', \"-c\",\n (\"git archive --format=tar %s | \"\n \"tar -C '%s' -xf -\") % (rev, path)],\n cwd=get_repo_dir())", "def traverse(object, path, default=None, request=None):", "def get_commit_ref(refenv, commit_hash):\n reftxn = TxnRegister().begin_reader_txn(refenv)\n try:\n cmtRefKey = commit_ref_db_key_from_raw_key(commit_hash)\n cmtSpecKey = commit_spec_db_key_from_raw_key(commit_hash)\n cmtParentKey = commit_parent_db_key_from_raw_key(commit_hash)\n\n cmtRefVal = reftxn.get(cmtRefKey, default=False)\n cmtSpecVal = reftxn.get(cmtSpecKey, default=False)\n cmtParentVal = reftxn.get(cmtParentKey, default=False)\n except lmdb.BadValsizeError:\n raise ValueError(f'No commit exists with the hash: {commit_hash}')\n finally:\n TxnRegister().abort_reader_txn(refenv)\n\n if (cmtRefVal is False) or (cmtSpecVal is False) or (cmtParentVal is False):\n raise ValueError(f'No commit exists with the hash: {commit_hash}')\n\n commitRefs = commit_ref_raw_val_from_db_val(cmtRefVal)\n commitSpecs = commit_spec_raw_val_from_db_val(cmtSpecVal)\n commitParent = commit_parent_raw_val_from_db_val(cmtParentVal)\n\n calculatedDigest = cmt_final_digest(\n parent_digest=commitParent.digest,\n spec_digest=commitSpecs.digest,\n refs_digest=commitRefs.digest)\n\n if calculatedDigest != commit_hash:\n raise IOError(\n f'Data Corruption Detected. On retrieval of stored references for '\n f'commit_hash: {commit_hash} validation of commit record/contents '\n f'integrity failed. Calculated digest: {calculatedDigest} != '\n f'expected: {commit_hash}. Please alert the Hangar development team to '\n f'this error if possible.')\n\n return commitRefs.db_kvs", "def checkout(commit_id: str) -> None:\n wit = WitEditor()\n wit_status = WitStatus()\n\n changes_to_be_committed = wit_status.get_changes_to_be_committed()\n changed, untracked = wit_status.compare_two_list_files(\n wit_status.original_files, wit_status.stage_files,\n wit_status.parent_wit_dir, wit.stage_dir\n )\n if changed or changes_to_be_committed:\n _logger.warning(\n 'There are changed files which have not been committed, '\n 'commit them first: %s',\n ', '.join(changed + changes_to_be_committed)\n )\n else:\n is_branch = False\n if commit_id in wit.get_all_branches()[1:]: # Without 'HEAD' line\n wit.update_activated_branch(commit_id)\n commit_id = wit.get_commit_id(f'{commit_id}=')\n is_branch = True\n commit_id_images_dir = os.path.join(wit.images_dir, commit_id)\n\n # Changing the original path content\n wit.copy_tree(\n src=commit_id_images_dir, dst=wit.parent_wit_dir,\n rel=commit_id_images_dir, ignore_files=untracked\n )\n\n # Changing the stage content\n shutil.rmtree(wit.stage_dir)\n os.mkdir(wit.stage_dir)\n wit.copy_tree(\n src=commit_id_images_dir, dst=wit.stage_dir,\n rel=commit_id_images_dir\n )\n wit.update_references_file(commit_id, is_branch)\n _logger.info(\n 'HEAD part had updated successfully to: %s, '\n 'contents had successfully changed', commit_id\n )", "def show(target, rev):\n\n assert os.path.exists(target), \"%s does not exist!\" % target\n git_tree = get_git_tree(target)\n bfr = None\n target = target.replace(git_tree, \"\", 1).lstrip(\"\\\\\" if _PLATFORM == \"windows\" else \"/\")\n\n if _PLATFORM == \"windows\":\n target = target.replace(\"\\\\\", \"/\")\n if git_tree is not None:\n bfr = gitopen([\"show\", \"%s:%s\" % (rev, target)], git_tree)\n return bfr", "def git_graph(commitData):\n source_target_commits = commitData[[\"parent_id\", \"commit_id\"]].dropna().astype(\"int64\")\n source_target_commits.columns = [\"source\", \"target\"]\n\n return nx.from_pandas_edgelist(source_target_commits, create_using=nx.OrderedDiGraph())", "def test_worktree_does_checkout(repository: Repository, path: Path) -> None:\n updatefile(path)\n branch = repository.heads.create(\"branch\")\n\n with repository.worktree(branch) as worktree:\n assert (worktree.path / path.name).is_file()", "def gitCheckoutRevision(self, path, rev):\r\n\r\n with workInDirectory(path):\r\n checkoutCmd = [\"git\", \"checkout\", rev]\r\n\r\n if self.verbose:\r\n print(\"Runing command : {}\".format(\" \".join(checkoutCmd)))\r\n SubProcessUtility.runCommand(checkoutCmd)", "def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):", "def test_detached_head(tmpdir):\n repo = Repo.init(path=tmpdir)\n tmppath = pathlib.Path(tmpdir)\n\n index = repo.index\n author = Actor(\"An author\", \"[email protected]\")\n committer = Actor(\"A committer\", \"[email protected]\")\n\n # First commit\n with open(tmppath / \"test.py\", \"w\") as ignore:\n ignore.write(\"print('hello world')\")\n\n index.add([\"test.py\"])\n commit1 = index.commit(\"commit1\", author=author, committer=committer)\n\n # Second commit\n with open(tmppath / \"test.py\", \"w\") as ignore:\n ignore.write(\"print('hello world')\\nprint(1)\")\n\n index.add([\"test.py\"])\n commit2 = index.commit(\"commit2\", author=author, committer=committer)\n\n repo.git.checkout(commit2.hexsha)\n repo.close()\n\n config = DEFAULT_CONFIG\n config.path = tmpdir\n\n archiver = GitArchiver(config)\n assert archiver.revisions(tmpdir, 1) is not None", "def find_element_by_commit(sysmlId, commitId):\n elementList = get_elements_from_elasticsearch(sysmlId)\n for element in elementList:\n if element[\"_source\"][\"_commitId\"] == commitId:\n return element[\"_source\"]", "def path(self, target):\n return self.get_paths(target, use_edges=False, downwards=True)[0]", "def _find_config_tree(tree: pyhocon.ConfigTree, target_node, path=\"\") -> list:\n result = []\n if path:\n next_path = path + \".\"\n else:\n next_path = \"\"\n for key in tree.keys():\n if key == target_node:\n result += [(path, tree)]\n else:\n if isinstance(tree[key], pyhocon.config_tree.ConfigTree):\n value = _find_config_tree(tree[key], target_node,\n path=next_path + key)\n if value:\n result += value\n return result", "def url_for(self: Self, commit_sha: str, path: str, lnum: int | None = None) -> str:\n # Default to main branch\n url = f\"https://github.com/{self.org}/{self.repo}/blob/{commit_sha}/{path}\"\n if lnum:\n url += f\"#L{lnum}\"\n return url", "def test_github_path_purepath():\n p = github_api.GithubPath('/tensorflow/datasets/tree/master/')\n sub_p = p / 'some_folder'\n assert isinstance(sub_p, github_api.GithubPath)\n assert str(p) == '/tensorflow/datasets/tree/master'\n assert p == github_api.GithubPath.from_repo('tensorflow/datasets')", "def find(cls, target):\r\n target_path = os.path.relpath(target.address.buildfile.parent_path, get_buildroot())\r\n\r\n def _find():\r\n for root_dir, types in cls._TYPES_BY_ROOT.items():\r\n if target_path.startswith(root_dir): # The only candidate root for this target.\r\n # Validate the target type, if restrictions were specified.\r\n if types and not isinstance(target, tuple(types)):\r\n # TODO: Find a way to use the BUILD file aliases in the error message, instead\r\n # of target.__class__.__name__. E.g., java_tests instead of JavaTests.\r\n raise TargetDefinitionException(target,\r\n 'Target type %s not allowed under %s' % (target.__class__.__name__, root_dir))\r\n return root_dir\r\n return None\r\n\r\n # Try already registered roots\r\n root = _find()\r\n if root:\r\n return root\r\n\r\n # Fall back to searching the ancestor path for a root.\r\n # TODO(John Sirois): We currently allow for organic growth of maven multi-module layout style\r\n # projects (for example) and do not require a global up-front registration of all source roots\r\n # and instead do lazy resolution here. This allows for parse cycles that lead to surprising\r\n # runtime errors. Re-consider allowing lazy source roots at all.\r\n for buildfile in reversed(target.address.buildfile.ancestors()):\r\n if buildfile not in cls._SEARCHED:\r\n ParseContext(buildfile).parse()\r\n cls._SEARCHED.add(buildfile)\r\n root = _find()\r\n if root:\r\n return root\r\n\r\n # Finally, resolve files relative to the BUILD file parent dir as the target base\r\n return target_path", "def get_current_path(self, cvs_path, lod):\n\n node = self.get_current_lod_directory(lod)\n\n for sub_path in cvs_path.get_ancestry()[1:]:\n node = node[sub_path]\n\n return node" ]
[ "0.5894933", "0.55579513", "0.5489392", "0.52907765", "0.528364", "0.5157724", "0.5122504", "0.51111794", "0.50728655", "0.4995954", "0.49644312", "0.486692", "0.4840334", "0.48367405", "0.48296806", "0.4813238", "0.47609875", "0.47581476", "0.472949", "0.4682747", "0.46711427", "0.46559998", "0.464159", "0.46230114", "0.4622479", "0.4618971", "0.46102506", "0.46036476", "0.45939866", "0.45883664" ]
0.7929258
0
Return the matrix square root of a hermitian or symmetric matrix Uses scipy.linalg.eigh() to diagonalize the input efficiently.
def sqrtmh(A, ret_evd=False, evd=None): if not evd is None: (ev, EV) = evd else: ev, EV = la.eigh(A) #uses LAPACK ***EVR ev = sp.sqrt(ev) #we don't require positive (semi) definiteness, so we need the scipy sqrt here #Carry out multiplication with the diagonal matrix of eigenvalue square roots with H(EV) B = mmul_diag(ev, H(EV)) if ret_evd: return mmul(EV, B), (ev, EV) else: return mmul(EV, B)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trans_hellinger(m):\n m = asmatrix(m)\n row_sums = sum(m, axis=1)\n result = sqrt(m / row_sums)\n return result", "def _symmetric_matrix_square_root(mat, eps=1e-10):\n # Unlike numpy, tensorflow's return order is (s, u, v)\n s, u, v = linalg_ops.svd(mat)\n # sqrt is unstable around 0, just use 0 in such case\n si = array_ops.where(math_ops.less(s, eps), s, math_ops.sqrt(s))\n # Note that the v returned by Tensorflow is v = V\n # (when referencing the equation A = U S V^T)\n # This is unlike Numpy which returns v = V^T\n return math_ops.matmul(\n math_ops.matmul(u, array_ops.diag(si)), v, transpose_b=True)", "def l2_square_from_inner_product(matrix):\n return np.diag(matrix)", "def diagonalize_asymm(H):\n E,C = np.linalg.eig(H)\n #if np.allclose(E.imag, 0*E.imag):\n # E = np.real(E)\n #else:\n # print \"WARNING: Eigenvalues are complex, will be returned as such.\"\n\n idx = E.real.argsort()\n E = E[idx]\n C = C[:,idx]\n\n return E,C", "def test_sym_sqrtm(self): \n # create random symmetric n x n matrix\n n = 5\n A = 5.0 * 2.0*(torch.rand(n,n) - 0.5)\n A = A + A.T\n\n # reference implementation of scipy\n sqA_scipy = sla.sqrtm(A.numpy())\n isqA_scipy = sla.inv(sla.sqrtm(A.numpy()))\n # my own implementation using pure torch functions\n sqA,isqA = (x.numpy() for x in _sym_sqrtm(A))\n \n self.assertTrue(np.isclose(sqA, sqA_scipy).all())\n self.assertTrue(np.isclose(isqA, isqA_scipy).all())", "def calculate_square_form(diagonal_matrix, total_sorts):\n n = len(diagonal_matrix)\n\n matrix = np.ndarray(shape=(n,n))\n\n for i in range(n):\n for j in range(len(diagonal_matrix[i])):\n # Also calculate the dissimilarity matrix\n matrix[i][j] = 100 - 100 * diagonal_matrix[i][j] / total_sorts\n matrix[j][i] = 100 - 100 * diagonal_matrix[i][j] / total_sorts\n if i == j:\n matrix[i][j] = 0\n\n return matrix\n\n # matrix = np.tril(diagonal_matrix, k=-1)\n # matrix = matrix + matrix.T\n # matrix = matrix * (-100 / total_sorts) + 100\n # np.fill_diagonal(matrix, 0)\n # return matrix", "def trans_chisq(m):\n m = asmatrix(m)\n grand_sum, row_sums, col_sums = m.sum(), m.sum(1), m.sum(0)\n result = m * sqrt(grand_sum)\n result /= row_sums\n result /= sqrt(col_sums)\n return result", "def hessian_matrix_eigvals(H_elems):\n return _symmetric_compute_eigenvalues(H_elems)", "def calculate_eigenvalues(H):\n eigenvalues, eigenvectors = np.linalg.eigh(H)\n return eigenvalues, eigenvectors", "def smith_nf(matrix):\n\n A=np.copy(matrix)\n if (np.around(A) != A).any():\n raise Exception('This function requires integer input.')\n\n # This looks much like an SVD algorithm that first bidiagonalizes\n # A by Givens rotations and then chases zeros, except for\n # the construction of the 2 by 2 elementary transformation.\n\n m, n = A.shape\n\n S = A\n U = np.eye(m)\n V = np.eye(n)\n\n # Bidiagonalize S with elementary Hermite transforms.\n for j in range(min(m, n)):\n # Zero column j below the diagonal.\n for i in range(j+1, m):\n if S[i, j]:\n # Construct an elementary Hermite transformation E\n # to zero S(i,j) by combining rows i and j.\n E = ehermite(S[j, j], S[i, j])\n # Apply the transform to S and U.\n S[[j, i], :] = np.dot(E, S[[j, i], :])\n # U[:, [j, i]] = U[:, [j, i]] / E\n U[:, [j, i]] = left_matrix_division(U[:, [j, i]], E) # solving the left matrix division\n\n # % Zero row j after the superdiagonal.\n for i in range(j+2, n):\n if S[j, i]:\n # Construct an elementary Hermite transformation E\n # to zero S(j,i) by combining columns j+1 and i.\n E = ehermite(S[j, j+1], S[j, i])\n # Apply the transform to S and V.\n S[:, [j+1, i]] = np.dot(S[:, [j+1, i]], E.T)\n # V[:, [j+1, i]] = V[:, [j+1, i]] / E\n V[:, [j+1, i]] = left_matrix_division(V[:, [j+1, i]], E) # solving the left matrix division\n\n # Now S is upper bidiagonal.\n # Chase the superdiagonal nonzeros away.\n\n D = np.diag(S, 1)\n while any(D):\n b = min(np.where(D))[0]\n # Start chasing bulge at first nonzero superdiagonal element.\n # To guarantee reduction in S(b,b), first make S(b,b) positive\n # and make S(b,b+1) nonnegative and less than S(b,b).\n if S[b, b] < 0:\n S[b, :] = -S[b, :]\n U[:, b] = -U[:, b]\n\n q = np.floor(S[b, b+1] / S[b, b])\n E = np.array([[1, 0], [-q, 1]])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E) # solving the left matrix division\n\n if S[b, b+1]:\n # Zero the first nonzero superdiagonal element\n # using columns b and b+1, to start the bulge at S(b+1,b).\n E = ehermite(S[b, b], S[b, b+1])\n S[:, [b, b+1]] = np.dot(S[:, [b, b+1]], E.T)\n # V[:, [b, b+1]] = V[:, [b, b+1]] / E\n V[:, [b, b+1]] = left_matrix_division(V[:, [b, b+1]], E)\n\n for j in range(min(m, n)):\n if j+1 < m:\n # Zero S(j+1,j) using rows j and j+1.\n E = ehermite(S[j, j], S[j+1, j])\n S[[j, j+1], :] = np.dot(E, S[[j, j+1], :])\n # U[:, [j, j+1]] = U[:, [j, j+1]] / E\n U[:, [j, j+1]] = left_matrix_division(U[:, [j, j+1]], E)\n if j+2 < n:\n # Zero S(j,j+2) using columns j+1 and j+2.\n E = ehermite(S[j, j+1], S[j, j+2])\n S[:, [j+1, j+2]] = np.dot(S[:, [j+1, j+2]], E.T)\n # V[:, [j+1, j+2]] = V[:, [j+1, j+2]] / E\n V[:, [j+1, j+2]] = left_matrix_division(V[:, [j+1, j+2]], E)\n D = np.diag(S, 1)\n\n # Now S is diagonal. Make it nonnegative.\n\n for j in range(min(m, n)):\n if S[j, j] < 0:\n S[j, :] = -S[j, :]\n U[:, j] = -U[:, j]\n\n # Squeeze factors to lower right to enforce divisibility condition.\n\n for i in range(min(m, n)):\n for j in range(i+1, min(m, n)):\n # Replace S(i,i), S(j,j) by their gcd and lcm respectively.\n a = S[i, i]\n b = S[j, j]\n [c, d, g] = extgcd(a, b)\n E = np.array([[1, d], [-b/g, a*c/g]])\n F = np.array([[c, 1], [-b*d/g, a/g]])\n S[np.ix_([i, j], [i, j])] = np.dot(np.dot(E, S[:, [i, j]][[i, j], :]), F.T)\n # S[i, i] = tmp_arr[0, 0]\n # S[i, j] = tmp_arr[0, 1]\n # S[j, i] = tmp_arr[1, 0]\n # S[j, j] = tmp_arr[1, 1]\n U[:, [i, j]] = left_matrix_division(U[:, [i, j]], E)\n V[:, [i, j]] = left_matrix_division(V[:, [i, j]], F)\n\n U = np.around(U)\n V = np.around(V)\n return U, S, V", "def get_whitener( A, k ):\n\n U, D, _ = svdk(A, k)\n Ds = sqrt(D)\n Di = 1./Ds\n return U.dot(diag(Di)), U.dot(diag(Ds))", "def hessian_sqrt(self, params):\n\n if type(params) is not MixedLMParams:\n params = MixedLMParams.from_packed(params, self.k_fe,\n self.use_sqrt)\n\n score0 = self.score_full(params)\n hess0 = self.hessian_full(params)\n\n params_vec = params.get_packed(use_sqrt=True)\n\n lin, quad = self._reparam()\n k_tot = self.k_fe + self.k_re2\n\n # Convert Hessian to new coordinates\n hess = 0.\n for i in range(k_tot):\n hess += 2 * score0[i] * quad[i]\n for i in range(k_tot):\n vi = lin[i] + 2*np.dot(quad[i], params_vec)\n for j in range(k_tot):\n vj = lin[j] + 2*np.dot(quad[j], params_vec)\n hess += hess0[i, j] * np.outer(vi, vj)\n\n return hess", "def whiten(X):\n X = asmatrix(X - mean(asmatrix(X),axis=1))\n C = X * X.T / X.shape[1]\n d, V = eigh(C)\n d[d<0] = 0 # In case d returns very small negative eigenvalues\n return (V / sqrt(d+spacing(1))) * V.T * X", "def diagonalize(operator):\n eig_values, eig_vecs = la.eigh(operator)\n # eig_values -= np.amin(eig_values)\n return eig_values, eig_vecs", "def get_sigmaw(self):\n\n try:\n out = np.diag(self.eigen_y)\n except AttributeError:\n self.get_eigen(predictor=False)\n out = np.diag(self.eigen_y)\n return out", "def sqrt_hessian(self, module, g_inp, g_out):\n return self._sqrt_hessian(module, g_inp, g_out)", "def hermitian(matrix):\n return sp.allclose(matrix, sp.conj(matrix.T))", "def run_numpy(self):\n return np.linalg.eigh(self.mat)", "def H(self) -> BaseMatrix:", "def H(self) -> BaseMatrix:", "def Hamiltonian(self):\n Vmat = sparse.spdiags([self.U], [0], len(self.U), len(self.U))\n Kmat = -self.KE * Schrodinger.D2mat(numpts=len(self.x), delta=self.x[1] - self.x[0], periodic=self.periodic,\n q=self.q)\n return Kmat + Vmat", "def _get_hessian(self):\n if not self.sparse:\n hess = numpy.dot(self.jacobian_T, self.jacobian)\n else:\n hess = self.jacobian_T*self.jacobian\n return hess", "def normalize(self, matrix):\n eigvals, eigvecs = np.linalg.eig(matrix)\n Sdiag = np.diagonal(np.linalg.inv(eigvecs)@matrix@eigvecs)\n S12diag = Sdiag**-.5\n S12 = np.zeros((len(S12diag), len(S12diag)))\n np.fill_diagonal(S12, S12diag)\n return S12", "def symmetrize(W):\n if W.shape[0] < W.shape[1]:\n raise ValueError('Input must be a rectangular matrix (more rows than columns).')\n\n Wsym = np.abs(W)/2 + W/2 # zero out negative entries\n Wsub = Wsym[:Wsym.shape[1],:] # extract topmost square\n Wsub = Wsub/2 + Wsub.T/2 # average off-diagonal pairs\n np.fill_diagonal(Wsub,0) # zero out diagonals\n Wsym[:Wsym.shape[1],:] = Wsub\n return Wsym", "def get_sigmazinv(self):\n\n try:\n out = np.diag(1 / self.eigen_x)\n except AttributeError:\n self.get_eigen(predictor=True)\n out = np.diag(1 / self.eigen_x)\n return out", "def diagonal_hessian(kernel: Kern, x: ndarray) -> ndarray:\n if isinstance(kernel, Stationary):\n num_points, num_dimensions = x.shape\n\n return np.zeros((num_points, num_dimensions, num_dimensions))\n else:\n raise NotImplementedError", "def errSinvh(self):\n return np.matrix(np.diag(self.errSinvhD))", "def Hamiltonian(self):\n U = self.U.flatten()\n Vmat = sparse.spdiags([U], [0], len(U), len(U))\n Kmat = sparse.kron(-self.KEy * Schrodinger.D2mat(len(self.y), self.y[1] - self.y[0], self.periodic_y, self.qy),\n sparse.identity(len(self.x))) + \\\n sparse.kron(sparse.identity(len(self.y)),\n -self.KEx * Schrodinger.D2mat(len(self.x), self.x[1] - self.x[0], self.periodic_x, self.qx))\n return Kmat + Vmat", "def get_whitener( A, k ):\n\n assert( mrank( A ) == k )\n # Verify PSD\n e = eigvals( A )[:k].real\n if not (e >= 0).all():\n print \"Warning: Not PSD\"\n print e\n\n # If A is PSD\n U, _, _ = svdk( A, k )\n A2 = cholesky( U.T.dot( A ).dot( U ) )\n W, Wt = U.dot( pinv( A2 ) ), U.dot( A2 )\n \n return W, Wt", "def sqrt(self: Float[LinearOperator, \"*batch M N\"]) -> Float[LinearOperator, \"*batch M N\"]:\n return ConstantDiagLinearOperator(self.diag_values.sqrt(), diag_shape=self.diag_shape)" ]
[ "0.62265146", "0.6111763", "0.60754913", "0.6025185", "0.5924092", "0.57053465", "0.56531864", "0.56375873", "0.56003076", "0.5589603", "0.5574254", "0.55350095", "0.5526857", "0.55079013", "0.5506939", "0.5501033", "0.54983324", "0.5481793", "0.5479065", "0.5479065", "0.5477925", "0.5444245", "0.54311097", "0.54221225", "0.54189044", "0.5410792", "0.5367758", "0.53580755", "0.53538245", "0.5335998" ]
0.66119474
0
Create a category for that party.
def create_category(party_id: PartyID, title: str) -> TourneyCategory: party = DbParty.query.get(party_id) if party is None: raise ValueError('Unknown party ID "{}"'.format(party_id)) category = TourneyCategory(party.id, title) party.tourney_categories.append(category) db.session.commit() return category
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_category():\n category = Category(name='testcategory', description=\"\", fee=DEFAULT_FEE)\n category.save()\n return category", "def create_new_wallet_category(self):\n wallet_category_vals = self._build_new_wallet_category_vals()\n return self.env['wallet.category'].create(wallet_category_vals)", "def create(data):\n \n # create category\n return Category(\n category_id = data['id'],\n name = data['name'])", "def sample_category(name='place'):\n return Category.objects.create(name=name)", "def create_category(name):\n return Category.objects.create(name=name)", "def create_category(self, name, color, parent=None):\n response = self.client.create_category(name, color, parent=parent)[\"category\"]\n self.category_list.append(Category(response[\"name\"], response[\"id\"], response[\"color\"], response[\"text_color\"]))", "def create_category(category_name, days):\n time = timezone.now() + datetime.timedelta(days=days)\n return Category.objects.create(category_name=category_name, pub_date=time)", "def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n session.add(c)\n session.commit()\n print 'Category \"' + name + '\" created.'\n return c", "def createCategory(name, user_id):\n c = Category(name=name, user_id=user_id)\n db_session.add(c)\n db_session.commit()\n return c", "def cc_category(save=True, **kwargs):\n responses = kwargs.pop('responses', [])\n save = save or responses # Adding responses forces save.\n defaults = {'title': str(datetime.now()),\n 'weight': random.choice(range(50)),\n 'locale': settings.LANGUAGE_CODE}\n defaults.update(kwargs)\n\n category = models.CannedCategory(**defaults)\n if save:\n category.save()\n # Add responses to this category.\n for response, weight in responses:\n models.CategoryMembership.objects.create(\n category=category, response=response, weight=weight)\n\n return category", "def add_category(self):\n name = self.caregoryName.text()\n if name == '':\n return\n parent = self.categoryParent.currentText()\n\n addition = self.orm.add_category(name, parent)\n if not addition:\n show_warning(\"Category already exists.\")\n else:\n self.show_categories()\n if parent == '':\n self.show_available_parents()", "def create_category(self, category):\n\n super().new_entry()\n\n return Categories.objects.create(\n name=category['id'].split(':')[1],\n name_fr=category['name'],\n url=category['url']\n )", "def create_category(self, name):\n logger.info('CategoryOfProduct category create initiated')\n newname = name\n try:\n with Transaction().start(DBNAME, 1) as transaction:\n categories = self.Category.search([('name', '=', newname), ('parent', '=', 'Ingredients')])\n parent = self.Category.search(['name', '=', 'Ingredients'])\n if categories:\n return False\n category = self.Category()\n if parent:\n category.parent = parent[-1]\n category.name = newname\n category.save()\n transaction.cursor.commit()\n return True\n except Exception:\n if settings.level == 10:\n logger.exception('raised exception')\n return False", "def create_category(self): # , conf_dir, title):\n category_file_path = self.event_dir / 'category.json'\n category_data = {\n 'title': self.title,\n }\n category_data_text = json.dumps(category_data, **\n JSON_FORMAT_KWARGS) + '\\n'\n save_file(category_file_path, category_data_text)\n logger.debug('File {} created', category_file_path)", "async def category(self,ctx):\n await ctx.send(\"Yes this is a category.\")", "def test_create_category(self):\n pass", "def add_category(self, scheme, term, label):\n category = atom.data.Category(scheme=scheme, term=term, label=label)\n self.category.append(category)\n return category", "def _add_icecat_categories(self, data):\n TreeNode = Pool().get('product.tree_node')\n ProductNodeRelationship = Pool().get(\n 'product.product-product.tree_node'\n )\n\n new_node = TreeNode._get_or_create_icecat_if_not_exists(\n int(data.Product.Category.get('ID'))\n )\n\n # add category to product\n ProductNodeRelationship.create([{\n 'product': self,\n 'node': new_node,\n }])", "def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')", "def add_Category(title,image):\n newCategory=Category.objects.create(title=title, image=image)\n return newCategory", "def create_from_icecat_data(cls, data):\n product = super(Product, cls).create_from_icecat_data(data)\n product._add_icecat_categories(data)\n return product", "def create_category():\n name = request.form.get(\"name\")\n\n if name is not None:\n\n icon = request.files.get(\"icon\")\n\n if icon is not None:\n\n if icon.content_type != \"image/svg+xml\":\n abort(400)\n\n ext = os.path.splitext(icon.filename)[1]\n filename = secure_filename(name.lower() + ext)\n icon.save(os.path.join(app.config[\"UPLOAD_FOLDER\"], filename))\n\n category = Category(name=name)\n category.insert()\n\n response = jsonify(\n {\"success\": True, \"created_category_id\": category.id}\n )\n\n else:\n abort(400)\n\n return response", "def compute_new_wallet_category(self):\n for company_id in self:\n default_wallet_category_id = self.create_new_wallet_category()\n default_wallet_category_id.company_id = company_id\n company_id.default_wallet_category_id = default_wallet_category_id", "def make_test_category(self):\n\n c = Category(slug='test')\n\n return c", "def income_cat(housing):\n logging.info(\"Creating Income Category.....\")\n housing[\"income_cat\"] = pd.cut(\n housing[\"median_income\"],\n bins=[0.0, 1.5, 3.0, 4.5, 6.0, np.inf],\n labels=[1, 2, 3, 4, 5],\n )\n return housing", "def create(self, validated_data):\n breed_data = validated_data.pop('breed').capitalize()\n breed_id, _ = Breed.objects.get_or_create(title=breed_data)\n # validated_data['breed'] = breed_id\n cat = Cat.objects.create(breed=breed_id, **validated_data)\n return cat", "def create_category(self, category_name):\n \n duplicate_check = Category.query.filter_by(name=category_name).first()\n if duplicate_check is not None:\n return\n category = Category(name=category_name, active=True)\n db.session.add(category)\n db.session.commit()", "def EventContentMissionExcelAddCategory(builder, Category):\n return AddCategory(builder, Category)", "def create(self, request):\n current_user = User.objects.get(id=request.user.id)\n if current_user.is_staff:\n category = Category()\n category.label = request.data[\"label\"]\n try:\n category.save()\n serializer = CategorySerializer(category, context={'request' : request})\n return Response(serializer.data, status=status.HTTP_201_CREATED)\n except ValidationError as ex:\n return Response({\"reason\": ex.message}, status=status.HTTP_400_BAD_REQUEST)\n else:\n return Response({'message': \"Only admins can create a category\"},\n status=status.HTTP_401_UNAUTHORIZED\n )", "def test_create_category_with_existing_name(self):\n sample_category()\n res = self.client.post(CATEGORY_URL, {\"name\": \"place\"})\n self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)\n self.assertEqual(\n res.data['errors']['name'][0],\n 'This field must be unique.')" ]
[ "0.6914065", "0.67545795", "0.6537486", "0.65007126", "0.6460365", "0.62982243", "0.6271935", "0.6229216", "0.6228962", "0.6087871", "0.5959873", "0.59403336", "0.5935428", "0.5925022", "0.5839894", "0.58252794", "0.58077806", "0.57977724", "0.5763211", "0.5750037", "0.5736662", "0.57215834", "0.5703867", "0.56755245", "0.5649421", "0.56110084", "0.56080467", "0.5566182", "0.5543635", "0.55368936" ]
0.76143277
0
Move a category upwards by one position.
def move_category_up(category: TourneyCategory) -> None: category_list = category.party.tourney_categories if category.position == 1: raise ValueError('Category already is at the top.') popped_category = category_list.pop(category.position - 1) category_list.insert(popped_category.position - 2, popped_category) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_category_down(category: TourneyCategory) -> None:\n category_list = category.party.tourney_categories\n\n if category.position == len(category_list):\n raise ValueError('Category already is at the bottom.')\n\n popped_category = category_list.pop(category.position - 1)\n category_list.insert(popped_category.position, popped_category)\n\n db.session.commit()", "def move_up(self):\n self.move_step(-1)", "def move_up(self, distance):\r\n return self.move('up', distance)", "def move_up(self):\n\t\treturn self._move(up=True)", "def move_up(self):\n return self._move(up=True)", "def _move_up(self, position):\n if position != self._data.first():\n self._data.add_first(self._data.delete(position))", "def move_up ( self ):\n list, index = self.get_info()\n self.value = (list[:index-1] + [ list[index], list[index-1] ] + \n list[index+1:])", "def move_up(self):\n self.move_measurement(-1)", "def _move_up(self, exclude=None):\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)", "def _move_up(self, position):\n if position != self._data.first(): # consider moving\n cnt = position.element()._count\n walk = self._data.before(position)\n if cnt > walk.element()._count: # must shift forward\n while (walk != self._data.first() and cnt > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n self._data.add_before(walk, self._data.delete(position)) # delete / reinsert", "def up(self):\n self.move(0, 1)", "def move_up(self):\n\n prev_sibling = self.get_previous_sibling()\n if prev_sibling!=None: \n self.move_to(prev_sibling,'left')\n self.save()", "def _move_up(self, p):\n if p != self._data.first():\n self._data.add_first(self._data.delete(p)) # remove or delete it from initial place and reinsert in new position", "def _move_up(self, p):\n if p != self.data.first():\n self.data.add_first(self.data.delete(p))", "def _move_up(self, p):\n if p == self._data.first():\n count = p.element()._count\n walk = self._data.before(p)\n if count > walk.element()._count: # must shift forward\n while (walk != self._data.first() and\n count > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n\n self._data.add_before(walk, self._data.delete(p)) # delete/reinsert", "def do_up(self, arg):\r\n moveDirection('up')", "def moveUp(self):\n currentRow = self.getCurrentRow()\n if currentRow > 0:\n rowData = self.removeRow()\n self.insertRow(currentRow - 1, rowData)\n self.layers.insert(currentRow - 1, rowData)\n if currentRow == 1:\n layer = self.layers[0]\n layer.dependType = ''\n self.updateDependLabels()", "def moveCategory(self, categoryName, direction):\n # TODO : NOT TESTED\n if direction == \"left\" or direction == \"down\":\n dir = -1\n if direction == \"right\" or direction == \"up\":\n dir = 1\n\n curCategories = self._loadCategories()\n\n index = curCategories.index(categoryName)\n newindex= index+dir\n if not (0 <= newindex <= len(curCategories)):\n return\n\n itemAtNewIndex = curCategories[newindex]\n\n curCategories[newindex] = categoryName\n curCategories[index] = itemAtNewIndex\n\n self._dumpJson(curCategories, self._pathsDict[\"categoriesFile\"])\n self._categories = curCategories\n return", "def move_up(self):\r\n if self.rect.top > 0:\r\n self.rect.top -= self.speed", "def move_up(self):\n nodes = self.object.nodes or []\n nodes_count = len(nodes)\n\n # no nodes => can't go up\n if not nodes_count:\n raise ParseError(\"Cannot move up, no nodes found\")\n\n if nodes_count == 1: # root node => 'reset' the traversal\n self.object.current_node_id = \"\" # :-/\n else:\n pre_node_id = nodes[-2][\"id\"]\n\n # if current node is end node => move up 2 nodes\n if is_pre_end_node(self.graph, pre_node_id) and nodes_count > 2:\n pre_node_id = nodes[-3][\"id\"]\n\n self.object.current_node_id = pre_node_id\n\n self.save(force_update=True)\n\n return self.object", "def move_up(self, request):\n return self._move(True, request)", "def up(self):\n self.forward(MOVE_DISTANCE)", "def move_up(self) -> None:\n try:\n line_start: int = self.buffer.reverse_index('\\n', end=self.index) + 1\n except ValueError:\n return\n\n previous_line_start: int\n try:\n previous_line_start = self.buffer.reverse_index('\\n', end=line_start - 1) + 1\n except ValueError:\n previous_line_start = 0\n\n previous_line_length = line_start - previous_line_start\n column: int = self.index - line_start\n if previous_line_length <= column:\n previous_line_end = line_start - 1\n self.index = previous_line_end\n else:\n self.index = previous_line_start + column", "def move_up(self, step: int = 1) -> None:\n if self.cursor_pos.x == 0:\n self.cursor_pos = Point(self.height - step, self.cursor_pos.y)\n else:\n self.cursor_pos = Point(self.cursor_pos.x-step, self.cursor_pos.y)", "def pos_up(self, y=1):\n\n self.y -= y\n return self.pos(self.x, self.y)", "def move_up(self):\n if self.center.y < (self.screen_height - (self.height / 2)):\n self.center.y += 5", "def move_up(self):\n\n if self.ycor() > 115:\n self.sety(130)\n else:\n new_y = self.ycor() + 40\n self.sety(new_y)", "def move_up(self):\n self.pitch_motor.step_backward()", "def move_up(self, num=1):\n self.position -= num\n if self.moved:\n return self.refresh()\n return u''", "def move_up(self,distance):\n client.moveByVelocityAsync(0, 0, 1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"up\")" ]
[ "0.7810339", "0.71507776", "0.70979744", "0.7040622", "0.70031214", "0.700075", "0.6951428", "0.6938323", "0.6885611", "0.6773141", "0.67571497", "0.6626251", "0.65867424", "0.65745205", "0.6459161", "0.64488965", "0.6434402", "0.63261694", "0.6297202", "0.6264247", "0.62206537", "0.6190449", "0.61849827", "0.61709964", "0.61481965", "0.6139032", "0.6103983", "0.60819846", "0.6076897", "0.6074423" ]
0.8217802
0
Move a category downwards by one position.
def move_category_down(category: TourneyCategory) -> None: category_list = category.party.tourney_categories if category.position == len(category_list): raise ValueError('Category already is at the bottom.') popped_category = category_list.pop(category.position - 1) category_list.insert(popped_category.position, popped_category) db.session.commit()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_category_up(category: TourneyCategory) -> None:\n category_list = category.party.tourney_categories\n\n if category.position == 1:\n raise ValueError('Category already is at the top.')\n\n popped_category = category_list.pop(category.position - 1)\n category_list.insert(popped_category.position - 2, popped_category)\n\n db.session.commit()", "def move_down(self):\n self.y -= 1", "def move_down(self, distance):\r\n return self.move('down', distance)", "def move_backward(self, distance):\r\n return self.move('back', distance)", "def move_up(self):\n self.move_step(-1)", "def moveCategory(self, categoryName, direction):\n # TODO : NOT TESTED\n if direction == \"left\" or direction == \"down\":\n dir = -1\n if direction == \"right\" or direction == \"up\":\n dir = 1\n\n curCategories = self._loadCategories()\n\n index = curCategories.index(categoryName)\n newindex= index+dir\n if not (0 <= newindex <= len(curCategories)):\n return\n\n itemAtNewIndex = curCategories[newindex]\n\n curCategories[newindex] = categoryName\n curCategories[index] = itemAtNewIndex\n\n self._dumpJson(curCategories, self._pathsDict[\"categoriesFile\"])\n self._categories = curCategories\n return", "def down(self):\n self.move(0,-1)", "def move_down ( self ):\n list, index = self.get_info()\n self.value = (list[:index] + [ list[index+1], list[index] ] + \n list[index+2:])", "def move_down(self):\n self.move_step(1)", "def move_down(self):\n\t\treturn self._move(up=False)", "def move_down(self):\n return self._move(up=False)", "def move_up(self):\n self.move_measurement(-1)", "def move_up(self, distance):\r\n return self.move('up', distance)", "def move_down(self):\n self.move_measurement(1)", "def move_down(self):\n\n next_sibling = self.get_next_sibling()\n if next_sibling!=None: \n self.move_to(next_sibling,'right')\n self.save()", "def move_down(self):\n if self.center.y > (self.height / 2):\n self.center.y -= 5", "def up(self):\n self.move(0, 1)", "def _move_up(self, position):\n if position != self._data.first():\n self._data.add_first(self._data.delete(position))", "def move_up(self):\n\t\treturn self._move(up=True)", "def move_up ( self ):\n list, index = self.get_info()\n self.value = (list[:index-1] + [ list[index], list[index-1] ] + \n list[index+1:])", "def move_up(self):\n return self._move(up=True)", "def _shift_down(self, idx):\n\n child = (idx + 1) * 2 - 1\n while child < self.size and (\n self.value(idx) < self.value(child) or\n self.value(idx) < self.value(child + 1)):\n # Compare the left child and the right child and get the index of the larger one.\n if self.value(child + 1) > self.value(child):\n child += 1\n self.items[idx], self.items[child] = self.items[child], self.items[idx]\n idx = child\n child = (idx + 1) * 2 - 1", "def _move_up(self, exclude=None):\n self._move(self._pos + _Vec3(0, 1, 0))\n self._place(self._pos + _Vec3(0, -1, 0), exclude)", "def end_category(cls, category):\n if cls._category != category:\n raise ValueError(\n f\"Current category is {cls._category} not {category}\")\n previous = cls._previous.pop()\n if previous is None:\n raise NotImplementedError(\n \"Use stop_category_timing to end the last category\")\n if category != previous:\n cls._change_category(previous)", "def _move_down(self):\n new_pos = self._pos + _Vec3(0, -1, 0)\n block_ = self._get_block(new_pos)\n if block_ != _WATER:\n self._add_to_inv(block_)\n self._move(new_pos)", "def _move_up(self, position):\n if position != self._data.first(): # consider moving\n cnt = position.element()._count\n walk = self._data.before(position)\n if cnt > walk.element()._count: # must shift forward\n while (walk != self._data.first() and cnt > self._data.before(walk).element()._count):\n walk = self._data.before(walk)\n self._data.add_before(walk, self._data.delete(position)) # delete / reinsert", "def move_down(self):\n\n if self.ycor() < -280:\n self.sety(-300)\n else:\n new_y = self.ycor() - 40\n self.sety(new_y)", "def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")", "def move_up(self):\n\n prev_sibling = self.get_previous_sibling()\n if prev_sibling!=None: \n self.move_to(prev_sibling,'left')\n self.save()", "def down(self):\n if self.bottom == self.current:\n return\n else:\n self.current -= 1" ]
[ "0.75041527", "0.6625059", "0.65953684", "0.6541835", "0.65137774", "0.6502901", "0.64917004", "0.6411511", "0.63754165", "0.63680816", "0.63534594", "0.634604", "0.6333525", "0.6307063", "0.6192082", "0.6185564", "0.6164168", "0.6129639", "0.60469884", "0.6000192", "0.60001844", "0.59739345", "0.59564537", "0.5952468", "0.5904656", "0.5876304", "0.58610463", "0.5795237", "0.5789386", "0.5788889" ]
0.80289805
0
Set up an interceptor so all grpc calls will have the apikey added on the header, in order to authenticate.
def set_interceptor(self, apikey): self.header_interceptor = \ interceptor.header_adder_interceptor( 'lc-api-key', apikey ) try: self.intercept_channel = grpc.intercept_channel( self.channel, self.header_interceptor) except ValueError as e: raise Exception("Attempted to connect on termninated client, " "channel has been shutdown") from e return lc.LcServiceStub(self.intercept_channel)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def authenticate(self, api_key):\n self.headers['x-rapidapi-key'] = api_key", "def __init__(self, api_key):\n self._api_key = api_key\n self.headers = {\n \"hibp-api-key\": api_key,\n \"user-agent\": \"haveibeenpywned.py\",\n }\n \"\"\"Dict of additional headers required for api calls to the haveibeenpwned.com\n api\"\"\"", "def __init__(self):\n self.headers = {\n 'Authorization': 'Bearer ' + app.config['SLACK_BEARER']\n }", "def __init__(self, api_key=\"\"):\n self.logger = logging.getLogger(__name__)\n self.host_url = 'https://community-api.coinmetrics.io/v2/'\n self.headers = {\"api_key\": api_key} if api_key != '' else {}", "def _addAuthenticationToRequestHeader(request, client):\n request.addAuthorization(client.id, client.secret)", "def requires_key(cls, func):\n\n def wrapped(*args, api_key=None, **kwargs):\n if api_key:\n cls.api_key = api_key\n openai.api_key = cls.api_key\n return func(*args, **kwargs)\n\n return wrapped", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header", "def __init__(self, api_key=None):\n self.session = Session()\n if api_key:\n self.session.headers.update({\n 'X-API-Key': api_key,\n })\n self._load_apis()", "def __init__(self, api_key: str):\n self.session: requests.Session = requests.Session()\n self.session.headers.update({'Authorization': api_key})", "def _build_headers(self, params: Dict) -> None:\n api_key = self._get_query_api_key(params) or self.user_api_key\n if api_key is None:\n raise RedashApiKeyNotProvidedException('No API key provided')\n self.headers = {\"Authorization\": \"Key {}\".format(api_key)}", "def __set_header(self, header):\n\n if APIKEYHEADER not in header:\n header[APIKEYHEADER] = self.__client.get_apikey()\n\n if ROUTETAG not in header:\n header[ROUTETAG] = self.__route_tag\n\n if FABIOROUTETAGHEADER not in header:\n header[FABIOROUTETAGHEADER] = self.__route_tag\n\n return header", "def apply(self, headers):\n headers['Authorization'] = 'Bearer ' + self._metadata_service.auth_token", "def add_headers():\n # the actual access token -\n g.x_tapis_token = request.headers.get('X-Tapis-Token')\n\n # the tenant associated with the subject of the request; used, for instance, when the subject is different\n # from the subject in the actual access_token (for example, when the access_token represents a service account).\n g.x_tapis_tenant = request.headers.get('X-Tapis-Tenant')\n\n # the user associated with the subject of the request. Similar to x_tapis_tenant, this is used, for instance, when\n # the subject is different from the subject in the actual access_token (for example, when the access_token\n # represents a service account).\n g.x_tapis_user = request.headers.get('X-Tapis-User')\n\n # a hash of the original user's access token. this can be used, for instance, to check if the original user's\n # access token has been revoked.\n g.x_tapis_user_token_hash = request.headers.get('X-Tapis-User-Token-Hash')", "def _build_common_headers(apikey: str):\n return {\n \"Authorization\": f\"token {apikey}\",\n \"User-Agent\": \"sharing-api-fetcher\",\n \"Accept-Encoding\": \"gzip\",\n \"Accept\": \"application/json\",\n }", "def __call__(self, request):\n self._logger.debug(f'__call__, {request.url} adding Authorization header')\n request.headers[\"Authorization\"] = self._get_auth_value()\n request.register_hook(\"response\", self._handle_401)\n return request", "def authenticate_header(self, request):\n return \"Api key authentication failed.\"", "def __init__(self, **kwargs):\n\n builder_kwargs = {}\n\n if \"token\" in kwargs and str(kwargs[\"token\"]) != \"None\":\n\n # If there is a token use it along with the specified proxy details if specified\n config = ApiConfiguration(\n api_url=kwargs.get(\"api_url\", None),\n certificate_filename=kwargs.get(\"certificate_filename\", None),\n proxy_config=ProxyConfig(\n address=kwargs.get(\"proxy_url\", None),\n username=kwargs.get(\"proxy_username\", None),\n password=kwargs.get(\"proxy_password\", None),\n ) if kwargs.get(\"proxy_url\", None) is not None else None,\n app_name=kwargs.get(\"app_name\", None)\n )\n\n builder_kwargs[\"api_configuration\"] = config\n builder_kwargs[\"token\"] = kwargs[\"token\"]\n\n # Otherwise use a secrets file if it exists\n builder_kwargs[\"api_secrets_filename\"] = kwargs.get(\"api_secrets_filename\", None)\n\n # add the correlation id if specified\n builder_kwargs[\"correlation_id\"] = kwargs.get(\"correlation_id\", None)\n\n # add the id provider response handler if specified\n builder_kwargs[\"id_provider_response_handler\"] = kwargs.get(\"id_provider_response_handler\", None)\n\n builder_kwargs[\"tcp_keep_alive\"] = kwargs.get(\"tcp_keep_alive\", False)\n\n # Call the client builder, this will result in using either a token, secrets file or environment variables\n self.api_client = ApiClientBuilder.build(**builder_kwargs)", "def add_header(response):\n response.headers['Authorization'] = response\n return response", "def set_api_key(self, api_key):\n self.api_key = api_key\n self.session.auth = (\"api\", api_key)", "def set_auth_headers(self, access_token, client_id):\n\t\tself.headers['X-Udemy-Bearer-Token'] = access_token\n\t\tself.headers['X-Udemy-Client-Id'] = client_id\n\t\tself.headers['Authorization'] = \"Bearer \" + access_token\n\t\tself.headers['X-Udemy-Authorization'] = \"Bearer \" + access_token", "def __call__(self, request):\n request.headers['Authorization'] = f'Token {self.token}'\n return request", "def __call__(self, resp):\r\n if not self.auth_token:\r\n self.auth()\r\n resp.register_hook('response', self.handle_error)\r\n resp.headers['X-Auth-Token'] = self.auth_token\r\n return resp", "def __init__(self, api_key):\n self._api_key = api_key\n self._get_params = urllib.urlencode({'subscription-key': api_key})\n self._headers = {\n # Basic Authorization Sample \n # 'Authorization': 'Basic %s' % base64.encodestring('{username}:{password}'),\n }", "def __init__(self, api_key, api_name):\n self.api_key = api_key\n self.api_name = api_name\n self.raven_client = get_raven_client()\n self.stats_client = get_stats_client()", "def set_apikey(self, apikey):\n self.apikey = apikey\n self.__init_submodules(apikey)", "def __init__(self, app_id, api_key):\r\n self.apiroot = 'https://api.intercom.io/v1'\r\n\r\n self.add_filter(auth.BasicAuth(app_id, api_key))\r\n self.add_filter(self.use_json)", "def __init__(self, api_key):\r\n self.apiroot = 'http://apps.compete.com'\r\n\r\n self.api_key = api_key\r\n self.add_filter(self.add_api_key)", "def __init__(self, authtoken, organization_id):\n self.headers = {\n 'Authorization': 'Zoho-oauthtoken ' + authtoken,\n }\n self.details = {\n 'organization_id': organization_id\n }", "def apply(cls, http_request, Configuration):\n # If this is API Key based authentication, we add the apiKey header\n if Configuration.api_key is not None:\n http_request.headers['apikey'] = Configuration.api_key\n return\n\n # If this is SessionId based authentication, we add the session-id header\n if Configuration.session_id is not None:\n http_request.headers['session-id'] = Configuration.session_id\n return\n\n # If this is Open-Id based authentication, we add the open-id-token header\n if Configuration.open_id_token is not None:\n http_request.headers['open-id-token'] = Configuration.open_id_token\n return\n\n cls.check_auth(Configuration)\n token = Configuration.auth_token.access_token\n token_type = Configuration.auth_token.token_type\n http_request.headers['Authorization'] = token_type+\" \"+token", "def SetAPIKey(self, api_key):\n self._api_key = api_key" ]
[ "0.6686443", "0.63640404", "0.59382933", "0.5867886", "0.5780181", "0.5739476", "0.57209116", "0.57013947", "0.56940454", "0.569231", "0.56693566", "0.56687033", "0.5631621", "0.56137", "0.5556618", "0.55513924", "0.5533995", "0.5528315", "0.55266106", "0.5516029", "0.5476392", "0.546727", "0.5437554", "0.5429208", "0.5423389", "0.54183257", "0.5414966", "0.5401217", "0.5398615", "0.539425" ]
0.79679054
0
returning all elements names from ``ImmunizationRecommendation`` according specification, with preserving original sequence order.
def elements_sequence(cls): return [ "id", "meta", "implicitRules", "language", "text", "contained", "extension", "modifierExtension", "identifier", "patient", "date", "authority", "recommendation", ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def itemnames():\n g = ['KIS_NA_39', 'VII_57', 'MX_48', 'MX_56', 'KIS_NA_42', 'VII_54',\n 'MX_S_48', 'MX_S_52', 'MX_52', 'KIS_NA_45', 'KIS_NA_51', 'MIP_45',\n 'MIP_49', 'MIP_52', 'MIP_plus_48', 'MIP_plus_51', 'MX_42', 'MX_45',\n 'MIP_G_42', 'KIS_42', 'KIS_NA_48']\n return(g)", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"indication\", \"dosingGuideline\"]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"code\",\n \"status\",\n \"author\",\n \"intendedJurisdiction\",\n \"name\",\n \"relatedMedicationKnowledge\",\n \"associatedMedication\",\n \"productType\",\n \"monograph\",\n \"preparationInstruction\",\n \"cost\",\n \"monitoringProgram\",\n \"indicationGuideline\",\n \"medicineClassification\",\n \"packaging\",\n \"clinicalUseIssue\",\n \"storageGuideline\",\n \"regulatory\",\n \"definitional\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"regulatoryAuthority\",\n \"substitution\",\n \"schedule\",\n \"maxDispense\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"definition\",\n \"doseForm\",\n \"intendedRoute\",\n \"ingredient\",\n \"drugCharacteristic\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"instantiatesCanonical\",\n \"instantiatesUri\",\n \"basedOn\",\n \"priorRequest\",\n \"groupIdentifier\",\n \"status\",\n \"intent\",\n \"priority\",\n \"codeReference\",\n \"codeCodeableConcept\",\n \"parameter\",\n \"subject\",\n \"encounter\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"authoredOn\",\n \"requester\",\n \"performerType\",\n \"performer\",\n \"reasonCode\",\n \"reasonReference\",\n \"insurance\",\n \"supportingInfo\",\n \"note\",\n \"relevantHistory\",\n ]", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"status\",\n \"category\",\n \"code\",\n \"subject\",\n \"context\",\n \"effectiveDateTime\",\n \"effectivePeriod\",\n \"issued\",\n \"performer\",\n \"specimen\",\n \"result\",\n \"imagingStudy\",\n \"image\",\n \"conclusion\",\n \"codedDiagnosis\",\n \"presentedForm\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"instantiatesCanonical\",\n \"instantiatesUri\",\n \"basedOn\",\n \"replaces\",\n \"groupIdentifier\",\n \"status\",\n \"intent\",\n \"priority\",\n \"code\",\n \"subject\",\n \"encounter\",\n \"authoredOn\",\n \"author\",\n \"reason\",\n \"goal\",\n \"note\",\n \"action\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"basedOn\",\n \"partOf\",\n \"status\",\n \"patient\",\n \"type\",\n \"suppliedItem\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"supplier\",\n \"destination\",\n \"receiver\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"issued\",\n \"applies\",\n \"type\",\n \"subType\",\n \"topic\",\n \"action\",\n \"actionReason\",\n \"securityLabel\",\n \"agent\",\n \"text\",\n \"valuedItem\",\n \"group\",\n ]", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"role\", \"actor\"]", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"type\", \"name\"]", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"actor\", \"role\"]", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"actor\", \"role\"]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"example\",\n \"name\",\n \"description\",\n \"acronym\",\n \"sourceUri\",\n \"sourceReference\",\n \"exampleFor\",\n ]", "def elementNames(self):\n nel = self.nElements()\n return map(self.elementName,range(nel))", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"function\", \"actor\"]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"treatmentIntent\",\n \"dosage\",\n \"administrationTreatment\",\n \"patientCharacteristic\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"url\",\n \"identifier\",\n \"version\",\n \"name\",\n \"title\",\n \"shortTitle\",\n \"subtitle\",\n \"status\",\n \"date\",\n \"description\",\n \"note\",\n \"useContext\",\n \"publisher\",\n \"contact\",\n \"author\",\n \"editor\",\n \"reviewer\",\n \"endorser\",\n \"relatedArtifact\",\n \"actual\",\n \"characteristicCombination\",\n \"characteristic\",\n \"handling\",\n \"category\",\n ]", "def elements_sequence(cls):\n return [\"id\", \"extension\", \"modifierExtension\", \"cost\", \"packagedProduct\"]", "def names(self):\n return list(item.name for item in self.mechanisms)", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"definitionUri\",\n \"definitionCanonical\",\n \"status\",\n \"partOf\",\n \"code\",\n \"subject\",\n \"context\",\n \"occurrenceDateTime\",\n \"occurrencePeriod\",\n \"occurrenceTiming\",\n \"performer\",\n \"performingOrganization\",\n \"requestingOrganization\",\n \"costCenter\",\n \"quantity\",\n \"bodysite\",\n \"factorOverride\",\n \"priceOverride\",\n \"overrideReason\",\n \"enterer\",\n \"enteredDate\",\n \"reason\",\n \"service\",\n \"productReference\",\n \"productCodeableConcept\",\n \"account\",\n \"note\",\n \"supportingInformation\",\n ]", "def get_names(self):\n selected_masks = self._component_obj.get_support()\n return [feature_name for (selected, feature_name) in zip(selected_masks, self.input_feature_names) if selected]", "def names(self) -> list[str]:", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"title\",\n \"requirement\",\n \"relatedData\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"title\",\n \"requirement\",\n \"relatedData\",\n ]", "def elements_sequence(cls):\n return [\n \"id\",\n \"extension\",\n \"modifierExtension\",\n \"description\",\n \"definitionReference\",\n \"definitionCanonical\",\n \"definitionCodeableConcept\",\n \"definitionExpression\",\n \"method\",\n \"device\",\n \"exclude\",\n \"timeFromStart\",\n \"groupMeasure\",\n ]", "def get_seq_names(self) -> List[str]:\n return [seq.Name.lower() for seq in self.Sequencers]", "def elements_sequence(cls):\n return [\n \"id\",\n \"meta\",\n \"implicitRules\",\n \"language\",\n \"text\",\n \"contained\",\n \"extension\",\n \"modifierExtension\",\n \"identifier\",\n \"status\",\n \"issued\",\n \"applies\",\n \"subject\",\n \"topic\",\n \"authority\",\n \"domain\",\n \"type\",\n \"subType\",\n \"action\",\n \"actionReason\",\n \"decisionType\",\n \"contentDerivative\",\n \"securityLabel\",\n \"agent\",\n \"signer\",\n \"valuedItem\",\n \"term\",\n \"bindingAttachment\",\n \"bindingReference\",\n \"bindingReference\",\n \"bindingReference\",\n \"friendly\",\n \"legal\",\n \"rule\",\n ]" ]
[ "0.56782556", "0.56028837", "0.5595492", "0.5593947", "0.55300343", "0.5521369", "0.55187535", "0.55180925", "0.5463728", "0.54546034", "0.54065096", "0.538096", "0.538058", "0.5376563", "0.5376563", "0.53717583", "0.5366831", "0.53619134", "0.5345186", "0.53447956", "0.53373325", "0.53278285", "0.5320872", "0.53051555", "0.5302597", "0.53008604", "0.53008604", "0.5285287", "0.528415", "0.5266382" ]
0.56116945
1
Callback when front server is connected.
def OnFrontConnected(self) -> None: self.gateway.write_log("行情服务器连接成功") self.login()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def on_connect(self):\n print('Client connected!')", "def on_connect():\n print(\"User connected!\")", "def connected(self):\n manager = self.manager()\n self.log().debug(\"Register [%s] callbacks\", self.name())\n\n manager.subscribeServerCallbacks(self, self.cfg().chatimg.servers or manager.SERVERS_ALL)", "def on_connect(self, client, userdata, flags, rc):\n\n\t\tself.subscribe(\"system\")\n\t\tprint (\"[{}] Client connected\".format(\n\t\t\tint(time.time())\n\t\t))", "def client_connected(data):\n print('a client connected')\n emit('queue_changed', queue.serialize())\n history = queue.instantiate_history()\n if len(history) > 0:\n song_data = history[-1]\n emit('mid_currently_playing', song_data)\n if cache.get('is_paused').decode('utf-8') == 'True':\n pause_time = int(cache.get('paused_time').decode('utf-8'))\n socketio.emit('paused', pause_time)", "async def on_connected(self):\n self._connected = True", "def on_connect(self):\n log.info(\"Stream connected\")", "def on_connection_start(self) -> None:\r\n print(\r\n \"Connected with: {}:{}\\n\".format(\r\n self.connection_info[\"host\"], self.connection_info[\"port\"]\r\n )\r\n )", "def on_connect( client, userdata, flag, rc ):\n if ( rc == 0 ):\n client.connected_flag = True\n logging.info( \"Connected to Broker! Returned code: %s\\n\" %rc )\n else:\n logging.info( \"Failed to connect. Returned code: %s\\n\" %rc )", "def slot_client_connected(self, _sender, _data):\r\n self.check_connect_ready()", "async def on_connect(self):\n pass", "def on_connect():\n print('User connected!')\n return'connected'", "def on_connect(client, userdata, flags, rc):\n print(\"Connected with with mqtt server: \" + str(rc))\n client.subscribe(\"clients/#\")", "def on_connect():\n logger.info(f\"{request.sid} Connected\")", "def on_server_start(self, server):\n pass", "async def on_connect(self) -> None:", "def on_server_start(self):\n raise NotImplementedError", "def on_connect(client, userdata, flags, rc):\n if rc == 0:\n print(\"Connected to broker\")\n client.connected_flag = True\n else:\n print(\"Connection failed\")\n client.connected_flag = False", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def callback_connect(self):\n pass", "def on_connect(client, userdata, flags, return_code):\n\n if return_code != 0:\n print(\"Connected with result code: \", str(return_code))\n else:\n client.connected_flag=True\n client.publish(status_topic, \"Online\", retain=True)", "def onConnect(self, request_or_response):", "def on_connect(self, userdata, flags, rc):\n logging.info(f'Connected with Mosquitto Server: (code) {rc}')", "def on_start(self):\n ProxyServerHandler.current.handler_ready(self)", "def connect():\n logging.info('Client connected')", "def on_connect(client):\n logging.info(\"Opened connection to %s\" % client.addrport() )\n\n state.set_client_list(client)\n state.initialize_client_state(client)\n client.send(\"\")\n client.send(\"Welcome to the %s Server, %s.\\n\" % (PRODUCT_NAME, client.addrport()) )\n client.send(\"Enter your user_id, or type \\\"enroll\\\" to create a new account: \")", "def clientConnected(self):\n self.running = True\n for d in self._callWhenReady:\n d.callback(None)", "def onConnect(self, response):\n\t\tif DEBUG:\n\t\t\tsys.stdout.write(\n\t\t\t\t\"Connection established. Response: '{r}'.\\n\".format(r=response)\n\t\t\t\t)\n\t\t# we need to tell the client-object that this is the connection\n\t\tself.factory.root.client = self", "def on_session_started():\n #print(\"on_session_started\")" ]
[ "0.7463222", "0.69432336", "0.686189", "0.6838079", "0.67208254", "0.66620857", "0.66216904", "0.658405", "0.6554587", "0.6540486", "0.64954203", "0.6479373", "0.64791733", "0.6423165", "0.64106476", "0.6404848", "0.64022064", "0.6392826", "0.63813555", "0.63813555", "0.63813555", "0.63579476", "0.63478446", "0.6328573", "0.6324107", "0.63224375", "0.6271612", "0.62385094", "0.62299025", "0.6214332" ]
0.74887276
1
Factory to make list of HeadingProduct objects from a list of Product objs. Works the same way as award.awards_list.make_list()
def make_list(products): heading_products = [] genres = set([p.genre for p in products]) for genre in genres: this_heading_product = HeadingProduct(genre, products) if len(this_heading_product.products): heading_products.append(this_heading_product) return heading_products
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_list(cls, *args):\n return _create_list(cls, *args)", "def create_list(cls, *args):\n return _create_list(cls, *args)", "def test_createGlossaryByList(self):\n li = []\n li.append(['term', 'tags', 'value'])\n li.append(['foo', 'a', '1'])\n li.append(['bar', 'a, b', '2'])\n li.append(['gnark', 'a, c', '3'])\n self.g = glossary.Glossary(li)", "def generate_products(num_products=30):\n products = []\n for item in range(0, num_products):\n gen_name = str(random.choice(ADJECTIVES) + \" \" + random.choice(NOUNS))\n price = random.uniform(5, 100)\n weight = random.uniform(5, 100)\n flammability = random.uniform(0.0, 2.5)\n products.append(Product(name=gen_name,\n price=price, weight=weight,\n flammability=flammability))\n return products", "def generate_products(num_products=30):\r\n products = []\r\n for i in range(num_products):\r\n name = sample(ADJECTIVES, 1)[0] + ' ' + sample(NOUNS, 1)[0]\r\n price = randint(5, 100)\r\n weight = randint(5, 100)\r\n flammability = uniform(0.0, 2.5)\r\n products.append(Product(name, price=price, weight=weight,\r\n flammability=flammability))\r\n return products", "def generate_products():\n # initialize list of noun and adj\n num_products = 30\n products = [0] * num_products\n prices = [0] * num_products\n weights = [0] * num_products\n flammabilities = [0] * num_products\n\n # initlize random word object\n random = RandomWords()\n\n adj = [random.get_random_word(includePartOfSpeech=\"adjective\")\n for product in products]\n noun = [random.get_random_word(includePartOfSpeech=\"noun\")\n for product in products]\n products = [noun + \" \" + adj for noun, adj in zip(adj, noun)]\n\n prices = [random.randint(5, 100) for price in prices]\n weights = [random.randint(5, 100) for weight in weights]\n flammabilities = [random.randint(0.0, 2.5)\n for flammability in flammabilities]\n\n return products, prices, weights, flammabilities", "def get_products(self):\n\n lst = []\n for product in self.products.findall('product'):\n id = product.find('id').text\n name = product.find('name').text\n dispensary_id = product.find('dispensary_id').text\n dispensary_name = product.find('dispensary_name').text\n canabis_brand = product.find('canabis_brand').text\n canabis_strain = product.find('canabis_strain').text\n category = product.find('category').text\n subcategory = product.find('subcategory').text\n thc_level = product.find('thc_level').text\n cbd_level = product.find('cbd_level').text\n cbn_level = product.find('cbn_level').text\n thc_level_type = product.find('thc_level_type').text\n cbd_level_type = product.find('cbd_level_type').text\n cbn_level_type = product.find('cbn_level_type').text\n\n description = product.find('description').text\n created_at = product.find('created_at').text\n updated_at = product.find('updated_at').text\n\n prices = []\n urls = []\n images = []\n\n for child in product:\n if child.tag == 'prices':\n for cost in child.findall('cost'):\n prices.append(Price(cost.attrib['unit'], cost.text))\n\n if child.tag == 'urls':\n admin = child.find('admin').text\n public = child.find('public').text\n urls.append(UrlInfo(admin, public))\n\n if child.tag == 'images':\n for image in child.findall('image'):\n images.append(Image(image.attrib['main'], image.text,))\n\n lst.append(Product(id, name, dispensary_id, dispensary_name,\n canabis_brand, canabis_strain,\n category, subcategory, thc_level, cbd_level,\n cbn_level, thc_level_type, cbd_level_type,\n cbn_level_type, prices, urls, images,\n description, created_at, updated_at))\n\n return lst", "def create(cls, vlist):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n Product = Pool().get('product.product')\n\n templates = super(Template, cls).create(vlist)\n products = []\n for template in templates:\n products.extend([Product(p) for p in template.products])\n IndexBacklog.create_from_records(products)\n return templates", "def _create_list_of_mock_iam_resources():\n policy_resources = []\n for data in fasd.IAM_POLICY_RESOURCES:\n policy = mock.MagicMock()\n policy.data = json.dumps(data['iam_policy'])\n policy.parent = mock.MagicMock()\n policy.parent.type = data['parent_type']\n policy.parent.name = data['parent_name']\n policy.parent.full_name = data['parent_full_name']\n policy_resources.append(policy)\n return policy_resources", "def create(cls, vlist):\n IndexBacklog = Pool().get('elasticsearch.index_backlog')\n\n products = super(Product, cls).create(vlist)\n IndexBacklog.create_from_records(products)\n return products", "def fill_item_list(self):\n return_list = []\n with Transaction().start(DBNAME, 1):\n self.productlist = self.Product.search([('description', '=', 'Stock'), ('type', '=', 'goods')])\n for i in self.productlist:\n return_list.append(i.template.name)\n return return_list", "def generate_products(n=30, price_range=(5, 10), weight_range=(5, 100)):\n products = []\n for i in range(1, n + 1):\n name = random.choice(ADJECTIVES) + ' ' + random.choice(NOUNS)\n price = random.randrange(price_range[0], price_range[1] + 1)\n weight = random.randrange(weight_range[0], weight_range[1] + 1)\n flammability = random.uniform(0.0, 2.5)\n product = Product(name, price, weight, flammability)\n products.append(product)\n return products", "def object_from_list(entry):\n return [DataModel(**item) for item in entry]", "def from_list(cls, rowslist, shape, domain):\n return cls(rowslist, shape, domain)", "def _listProduct(self,lol,anch):#a recursive list product taker\n\t\ttry:\n\t\t\tif str(type(lol[0])) != \"<type 'list'>\":\n\t\t\t\traise IndexError\n\t\t\tself._listProduct(utils.xproduct(lol.pop(0),lol.pop(0))+lol,anch)\n\t\texcept IndexError:\n\t\t\tanch.extend(lol)", "def _build_itemized_description_table0(products: typing.List[Product] = []):\n table_001 = FixedColumnWidthTable(number_of_rows=15, number_of_columns=4)\n for h in [\"Ensemble Name\", \"Classification\", \"Info\", \"Fee\"]:\n table_001.add(\n TableCell(\n Paragraph(h, font_color=X11Color(\"White\")),\n background_color=HexColor(\"0b3954\"),\n )\n )\n\n odd_color = HexColor(\"f4f3f3\")\n even_color = HexColor(\"FFFFFF\")\n \n for row_number, item in enumerate(products):\n c = even_color if row_number % 2 == 0 else odd_color\n table_001.add(TableCell(Paragraph(item.name), background_color=c))\n table_001.add(TableCell(Paragraph(str(item.quantity)), background_color=c))\n table_001.add(\n TableCell(Paragraph(\"$ \" + str(item.price_per_sku)), background_color=c)\n )\n table_001.add(\n TableCell(\n Paragraph(\"$ \" + str(item.quantity * item.price_per_sku)),\n background_color=c,\n )\n )\n\n # Optionally add some empty rows to have a fixed number of rows for styling purposes\n for row_number in range(len(products), 10):\n c = even_color if row_number % 2 == 0 else odd_color\n for _ in range(0, 4):\n table_001.add(TableCell(Paragraph(\" \"), background_color=c))\n\n # subtotal\n subtotal: float = sum([x.price_per_sku * x.quantity for x in products])\n table_001.add(\n TableCell(\n Paragraph(\n \"Subtotal\",\n font=\"Helvetica-Bold\",\n horizontal_alignment=Alignment.RIGHT,\n ),\n col_span=3,\n )\n )\n table_001.add(\n TableCell(Paragraph(\"$ 1,180.00\", horizontal_alignment=Alignment.RIGHT))\n )\n\n # discounts\n table_001.add(\n TableCell(\n Paragraph(\n \"Discounts\",\n font=\"Helvetica-Bold\",\n horizontal_alignment=Alignment.RIGHT,\n ),\n col_span=3,\n )\n )\n table_001.add(TableCell(Paragraph(\"$ 0.00\", horizontal_alignment=Alignment.RIGHT)))\n\n # taxes\n taxes: float = subtotal * 0.06\n table_001.add(\n TableCell(\n Paragraph(\n \"Taxes\", font=\"Helvetica-Bold\", horizontal_alignment=Alignment.RIGHT\n ),\n col_span=3,\n )\n )\n table_001.add(\n TableCell(Paragraph(\"$ \" + str(taxes), horizontal_alignment=Alignment.RIGHT))\n )\n\n # total\n total: float = subtotal + taxes\n table_001.add(\n TableCell(\n Paragraph(\n \"Total\", font=\"Helvetica-Bold\", horizontal_alignment=Alignment.RIGHT\n ),\n col_span=3,\n )\n )\n table_001.add(\n TableCell(Paragraph(\"$ \" + str(total), horizontal_alignment=Alignment.RIGHT))\n )\n table_001.set_padding_on_all_cells(Decimal(2), Decimal(2), Decimal(2), Decimal(2))\n table_001.no_borders()\n return table_001", "def create_list(self, args, l_type):\n\n scraper_types = [\n \"subreddit\",\n \"redditor\",\n \"comments\"\n ]\n\n index = scraper_types.index(l_type)\n item_list = [item[0] for item in self._list_switch(args, index)]\n\n return item_list", "def create_products():\n try:\n # Instantiate the class and separate objects into two lists\n challenge = Challenge()\n # Get all products\n product_base = challenge.get_products(\"product_groups.json\")\n # Divide the products into independent (no parent) and dependent (with parents)\n independent, dependent = challenge.filter_products(product_base)\n if not challenge.save_independent_products(independent):\n Exception(\"Function save_independent_products() couldn't complete\")\n\n if not challenge.save_dependent_products(\n dependent, product_base, len(independent)\n ):\n raise Exception(\"Function save_dependent_products() couldn't complete\")\n\n except Exception as err:\n logging.error(f\"[ERROR] While processing the objects. Traceback: {err}\")\n return False\n else:\n return True", "def __init__(self): \n self.products_list = []", "def refine_product_headers(product, total_obj_list):\n hdu, closefits = _process_input(product)\n phdu = hdu[0].header\n # Insure rootname and filename keywords matches actual filename\n phdu['rootname'] = '_'.join(product.split('_')[:-1])\n phdu['filename'] = product\n\n # Determine level of the product\n level = 1 if len(phdu['rootname'].split('_')[-1]) > 6 else 2\n\n # Update PINAME keyword\n phdu['piname'] = phdu['pr_inv_l']\n\n # Start by updating the S_REGION keyword.\n compute_sregion(hdu)\n\n # Compute numexp as number of exposures NOT chips\n input_exposures = list(set([kw[1].split('[')[0] for kw in phdu['d*data'].items()]))\n if level == 1:\n ipppssoots = [fname.split('_')[0] for fname in input_exposures]\n phdu['ipppssoo'] = ';'.join(ipppssoots)\n phdu['numexp'] = len(input_exposures)\n\n # Convert dates to ISO format\n phdu['date-beg'] = (Time(phdu['expstart'], format='mjd').iso, \"Starting Date and Time\")\n phdu['date-end'] = (Time(phdu['expend'], format='mjd').iso, \"Ending Date and Time\")\n\n phdu['equinox'] = hdu[('sci', 1)].header['equinox'] if 'equinox' in hdu[('sci', 1)].header else 2000.0\n\n # Re-format ACS filter specification\n if phdu['instrume'] == 'ACS':\n phdu['filter'] = get_acs_filters(hdu, delimiter=';')\n\n # Insure PHOT* keywords are always in SCI extension\n for pkw in PHOT_KEYWORDS:\n if pkw in phdu:\n hdu[('sci', 1)].header[pkw] = (phdu[pkw], phdu.cards[pkw].comment)\n del phdu[pkw]\n\n # Apply any additional inputs to drizzle product header\n if level:\n hdu[0].header['haplevel'] = (level, \"Classification level of this product\")\n\n # Reset filter specification for total detection images which combine filters\n if 'total' in phdu['rootname']:\n phdu['filter'] = 'detection'\n\n # Build HAP table\n # if 'total' in product: level = 3\n update_hdrtab(hdu, level, total_obj_list, input_exposures)\n\n # close file if opened by this function\n if closefits:\n hdu.close()", "def __construct_bill_lineitems(bill_lineitems: List[BillLineitem]) -> List[Dict]:\n lines = []\n\n for line in bill_lineitems:\n line = {\n 'Description': line.description,\n 'DetailType': 'AccountBasedExpenseLineDetail',\n 'Amount': line.amount,\n 'AccountBasedExpenseLineDetail': {\n 'AccountRef': {\n 'value': line.account_id\n },\n 'CustomerRef': {\n 'value': line.customer_id\n },\n 'ClassRef': {\n 'value': line.class_id\n }\n }\n }\n lines.append(line)\n\n return lines", "def ConstructList(title, items):\n buf = cStringIO.StringIO()\n fmt = 'list[title=\"{title}\",always-display-title]'.format(title=title)\n resource_printer.Print(sorted(set(items)), fmt, out=buf)\n return buf.getvalue()", "def _build_itemized_description_table(products: typing.List[Product] = []):\n numrows = len(products)\n table_001 = FlexibleColumnWidthTable(number_of_rows=numrows, number_of_columns=3) \n table_001.add(\n TableCell(\n Paragraph(\"Ensemble Name\", font_color=X11Color(\"White\")),\n background_color=HexColor(\"0b3954\"),\n preferred_width=Decimal(256),\n )\n )\n table_001.add(\n TableCell(\n Paragraph(\"Classification\", font_color=X11Color(\"White\")),\n background_color=HexColor(\"0b3954\"),\n preferred_width=Decimal(128),\n )\n )\n table_001.add(\n TableCell(\n Paragraph(\"Fee\", font_color=X11Color(\"White\")),\n background_color=HexColor(\"0b3954\"),\n preferred_width=Decimal(64),\n )\n )\n \n return table_001", "def test_create_obj_by_type_from_list(self):\n test_obj = [mock.MagicMock(), \"test_atrt\", {}]\n returned_obj = self.tested_class._create_obj_by_type(test_obj)\n self.assertIsInstance(returned_obj, list)\n self.assertIs(returned_obj[0], test_obj[0])\n self.assertEqual(returned_obj[1], test_obj[1])\n self.assertIsInstance(returned_obj[2], self.tested_class)", "def from_list(cls, ticker_list, start, end, get_ohlcv=False,\n get_fundamentals=False):\n\n if get_fundamentals:\n cls._init_spiders(ticker_list=ticker_list, start_date=start,\n end_date=end)\n\n with db.transactional_session() as session:\n for ticker in ticker_list:\n session.add(cls(ticker=ticker, start_date=start, end_date=end,\n get_ohlcv=get_ohlcv,\n get_fundamentals=get_fundamentals))", "def p():\n args = {'product_id' : 1, 'sku': 'abc', 'upc': 'def',\n 'name' : 'hello', 'description' : 'xfsef', \n 'category1' : 'sdfds', 'category2' : 'dsfssaa',\n 'storage' : 'afas', 'keywords' : '32423ssdf', \n 'quantity' : 3240, 'price': 23234, 'item_weight' : 23423,\n 'item_weight_unit' : 'aefewa', 'item_volume' : 12.3,\n 'item_volume_unit' : 'sfds4', 'expiry_date': '02/02/20', \n 'items_per_case' : 2343, \n 'case_wt' : 324234, 'case_wt_unit' : 'safa', 'case_dim' : '3ags',\n 'case_dim_unit' : 'sdfs', 'photo1' : 'sdfsf34', 'photo2' : 'sdfgs',\n 'photo3' : 'sdgfsdrf', 'created' : '2020-01-02 34:23:34', \n 'last_updated' : '2024-34-34 34.12.34' }\n return Product(**args)", "def products(self):\r\n return Products(self)", "def construct(cls, obs_lists, platform_id):\n step = 0\n LookupTable = []\n while step < obs_lists.shape[0]:\n K = str(int(obs_lists[step, 0]))\n LookupTable.append(BaseCreateFactory(K, platform_id).create_object())\n step += 1\n return LookupTable", "def from_list(cls, lst):\n return cls(lst[0], lst[1], lst[2])", "def create(data):\n \n return Partlist(\n list_id = data['id'],\n name = data['name'],\n pieces = data['num_parts'])" ]
[ "0.56937706", "0.56937706", "0.561895", "0.5307927", "0.52138203", "0.5196834", "0.51320314", "0.5119527", "0.50780755", "0.5066703", "0.50650996", "0.5048959", "0.5017417", "0.49839976", "0.49739638", "0.49701428", "0.4967386", "0.49507985", "0.48988718", "0.4894856", "0.4892113", "0.48866087", "0.4857621", "0.48575592", "0.48546147", "0.48474967", "0.4843721", "0.48231575", "0.4815388", "0.4814931" ]
0.7035925
0
same approach as the to_markup_dict() method on Product
def to_markup_dict(self, markup): ret = self.to_dict() ret["markup"] = markup.make(self.to_dict()) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _format_product(self, product):\n formatted = {}\n variants = []\n formatted['id'] = product.get('id')\n formatted['name'] = product.get('name')\n formatted['in_stock'] = True if product.get('stock_status') == 'instock' else False\n # Variants (Includes prices here since variants can be different prices)\n for variant in product.get('variations', []):\n formatted_variant = self._format_variant(variant)\n variants.append(formatted_variant)\n formatted['variants'] = variants\n # Options\n if product.get('attributes'):\n options = [{attribute.get('name').lower(): attribute.get('options')} for attribute in product['attributes']]\n elif product.get('default_attributes'):\n options = [{attribute.get('name').lower(): [attribute.get('option')]} for attribute in product['default_attributes']]\n else:\n options = {}\n formatted['options'] = options\n return formatted", "def __repr__(self):\n\n return \"<Product: {}>\".format(self.name)", "def get_products_dict(products):\n # lang = get_language()[:2]\n lang = ''\n products_dict = {}\n try:\n if products and products[0].get('source') == 'greedy':\n for product in products:\n key = product['name']\n products_dict[key] = products_dict.get(key, {})\n products_dict[key].setdefault('products', []).append(key)\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product['net_price']\n else:\n product_objs = list(Product.objects.using('slave').in_bulk([p['product_id'] for p in products]).values())\n bundled_products = []\n for product in product_objs:\n for bundled_product in product.bundled.all():\n bundled_product.price = 0\n bundled_products.append(bundled_product)\n product_objs.extend(bundled_products)\n for product in product_objs:\n key = getattr(product.parent, 'name_%s' % lang)\n products_dict[key] = products_dict.get(key, {\n 'expire_in': product.expire_in,\n 'never_expire': product.never_expire\n })\n products_dict[key].setdefault('products', []).append(mark_safe(product.name))\n products_dict[key]['price'] = products_dict[key].get('price', 0) + product.price\n # Convert it to a format which is easy to handle in email templates\n products_dict = [{\n 'title': key,\n 'body': value,\n } for key, value in products_dict.items()]\n except (ValueError, KeyError, AttributeError):\n products_dict = list({'title': p['name'], 'body': {'expire_in': None, 'never_expire': None}} for p in products)\n\n return products_dict", "def to_dict(self):\n return dict(name=self.name, product=str(self.product),\n source_code=self.source_code)", "def __repr__(self):\n return str(dict(self))", "def adapter_for_terminal(products: list):\n\n for product in products:\n product['categories'] = ', '.join(product.get('categories', ()))\n product['brands_tags'] = ', '.join(product.get('brands_tags', ()))\n product['ingredients'] = ', '.join(\n ingredient['text'] for ingredient in\n product.get('ingredients', ()))\n product['stores_tags'] = ', '.join(product.get('stores_tags', ()))", "def to_dict(self):\r\n\r\n return {\r\n 'product_id': self.product_id,\r\n 'product_name': self.product_name\r\n }", "def get_dict_repr(self):\n return { 'id': self.invoice_id,\n self.json_id: self.items }", "def __repr__(self, *args, **kwargs):\n result ='{'\n for (k, v) in self.items(*args, **kwargs):\n result += repr(k) + \": \" + repr(v) + \",\"\n\n result = result[:-1] + '}'\n return result", "def get_dict_repr(self):\n return { self.json_id:\n super(DynamicObjectWithJSONId, self).get_dict_repr() }", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system, 'brain volume': self.brain_volume}", "def __str__(self):\n try:\n delim = ', ' if len(self) < 8 else ',\\n '\n s = delim.join('%s: %s' % (repr(k), repr(self[k])) for k in self.peys())\n return '{' + s + '}'\n except Exception:\n return dict.__repr__(self)", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n }", "def get_dict_repr(self):\n return self.__dict__", "def __repr__(self) -> str:\n return str(self.as_dict())", "def serialize(self):\n return {\n 'name': self.name,\n 'description': self.description,\n 'id': self.id,\n 'price': self.price,\n 'catch_phrase': self.catch_phrase,\n }", "def __repr__(self) -> dict:\n return str({'label': self.label, 'freq': self.freq, 'left': self.left, 'right': self.right.code})", "def serialize(self):\n return {\n 'id': self.id,\n 'name': self.name,\n 'description': self.description,\n 'catalog': self.catalog.serialize,\n }", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system}", "def __repr__(self):\n return dict_to_table(self)", "def _ks_prepare_odoo_product_tag_data(self, record):\n data = {\n \"name\": record.ks_name,\n \"slug\": record.ks_slug or '',\n \"description\": record.ks_description or ''\n }\n return data", "def __repr__(self):\n return repr(dict([(k, v) for k, v in self.iteritems()]))", "def __repr__(self):\n dictionary = {\"Question Text\": self.text, \"Answers\": \" \".join(self.answers), \\\n \"Referers\": \" \".join(self.referers), \"Named entities\": \" \".join(self.named_entities) }\n return repr(dictionary)", "def make_dict(self):\n return self.generate_widgets()", "def serialize(self):\n return {'id':self.id,\n 'flavor':self.flavor,\n 'size':self.size,\n 'rating':self.rating,\n 'image':self.image}", "def __str__(self):\n return _(\n \"product (name: %(name)s, quantity: %(quantity)d, \"\n \"unit price: %(unit_price)d, category: %(category)s, code: %(code)s)\") \\\n % {\n 'name': self.name,\n 'quantity': self.quantity,\n 'unit_price': self.unit_price,\n 'category': self.category.name,\n 'code': self.code}", "def __repr__(self):\n res = \"{\"\n for k in self.keys():\n res+=\" '\"+str(k)+\"':\"+str(self[k])+\",\"\n res=res[:-1]+\" }\"\n return res", "def __repr__(self):\r\n return {'name':self.name, 'weight':self.organ_weight_grams, 'vital organ': self.vital_organ, 'organ system': self.organ_system, 'heart thickness': self.heart_thickness_cm, 'heart breadth': self.heart_breadth_cm, \"heart length\": self.heart_length_cm}", "def __repr__(self): # pragma: no cover\r\n class_name = type(self).__name__\r\n attributes = \", \".join([f\"{k!r}={v!r}\" for k, v in self])\r\n\r\n return f\"<{class_name}({attributes})>\"", "def __repr__(self):\n return f'<RestaurantProduct restaurant: {self.restaurant_id} product: {self.product_id}>'" ]
[ "0.64752793", "0.605072", "0.60264534", "0.6015541", "0.5958979", "0.5930521", "0.59252644", "0.591506", "0.5822085", "0.5803746", "0.58024323", "0.5743002", "0.57425123", "0.57264364", "0.57172954", "0.5709384", "0.5706299", "0.5706014", "0.5695772", "0.5667667", "0.5665223", "0.56648654", "0.5645884", "0.5641796", "0.5627145", "0.5622081", "0.5611142", "0.5607654", "0.558104", "0.55808574" ]
0.63571995
1
Calculates the number of frames o file
def __calculate_number_of_frames(self): # Save current position current_pos = self.__file_object.tell() # Go to start of first frame self.__file_object.seek(self.__first_frame_raw_data_position) self.number_of_frames = 0 while True: if not self.__file_object.read(self.__frame_raw_data_size): break self.__file_object.readline() self.number_of_frames += 1 # Restore file pointer self.__file_object.seek(current_pos) print('Number of frames:', self.number_of_frames)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getFileCount(self) -> int:\n ...", "def fileCount(self):\n pass", "def num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def get_num_frames(self):\n return self._frames.shape[0]", "def get_total_frames(self) -> int:\n return self.num_frames", "def num_frames(self):\n return self._first_rgb.shape[1]", "def size(self):\n if self.frames is None:\n return 0\n return self.frames.size", "def __len__(self):\n return self._num_samples_per_file * len(self._files) // self._world_size", "def get_num_frames(filename, ext='*.jpg'):\n if os.path.isdir(filename):\n return len(glob.glob(os.path.join(filename, ext)))\n elif os.path.isfile(filename):\n cmd = ('ffprobe -v 0 -count_frames -select_streams v:0 '\n '-show_entries stream=nb_read_frames -of '\n 'default=nokey=1:noprint_wrappers=1 ' + filename).split()\n pid = subprocess.run(cmd, stdout=subprocess.PIPE,\n universal_newlines=True)\n if pid.returncode != 0:\n return None\n nframes_expr = pid.stdout\n nframes = int(nframes_expr.rstrip())\n return nframes\n else:\n raise ValueError('Unexpect filename: {}'.format(filename))", "def num_frames(self):\n return len(self.video)", "def count_frames():\n frames = sentence.sem.frames.find_all('frame', {'name' : NEGATION_FRAME_NAME})\n frame_count = []\n for f_r in frames:\n frame_count.append(f_r)\n return len(frame_count)", "def realFrameNumber(self, callback=None):\n count = 0\n theoreticalFrameNumber = int(self.video.get(cv2.CAP_PROP_FRAME_COUNT))\n if theoreticalFrameNumber > 30000:\n return theoreticalFrameNumber\n while(True):\n # Capture frame-by-frame\n ret, frame = self.video.read()\n if not ret:\n break\n if callback != None:\n callback(0.1 + (count / theoreticalFrameNumber) * 0.75, \"Calculating the number of frame\")\n count += 1\n return count", "def bpCount(file):\n amount_bp = len(file)\n return amount_bp", "def count_len(self):\n total = 0\n for filename in self.filenames:\n f = open(os.path.join(self.directory, filename))\n line_count = 0\n for _ in f:\n line_count += 1\n if line_count < self.window_size:\n continue\n else:\n total += line_count - self.window_size + 1\n return total", "def count_frames(f):\n def counted(n):\n counted.open_count += 1\n counted.max_count = max(counted.max_count, counted.open_count)\n result = f(n)\n counted.open_count -= 1\n return result\n counted.open_count = 0\n counted.max_count = 0\n return counted", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def lws_num_frames(length, fsize, fshift):\n pad = (fsize - fshift)\n if length % fshift == 0:\n M = (length + pad * 2 - fsize) // fshift + 1\n else:\n M = (length + pad * 2 - fsize) // fshift + 2\n return M", "def frames(self):\n frame_count = 0\n if self.is_video() or self.is_audio():\n if self.__dict__['nb_frames']:\n try:\n frame_count = int(self.__dict__['nb_frames'])\n except ValueError:\n raise FFProbeError('None integer frame count')\n return frame_count", "def countSamples(filename):\n with open(filename, \"r\") as f:\n line = f.readline().split(\"\\t\")\n return len(line) - 2", "def numberFiles(self):\n return self.n", "def numberFiles(self):\n with open(self.inputfile) as fin:\n for n, _ in enumerate(fin, start=1): pass\n self.n = n\n return self.n", "def __len__(self):\n return int(np.ceil(self.total_frame_count / self.batch_size))", "def FrameCount(self):\r\n\t\treturn self._get_attribute('frameCount')", "def countDataSize(self,filename):\n \n d = h5py.File(filename,'r')\n features = d['spectrometer/features'][:]\n select = self.selectData(features.astype(float), self.ifeature, d)\n N = len(features[select])\n d.close()\n\n N = (N//self.offsetLen) * self.offsetLen\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def bframes_count(**kwargs) -> int:\n path_project = kwargs['project_name']\n project_name = path_project.split( '/' )[-1].strip( '.' )\n if project_name in frames_count:\n return frames_count[project_name]['count']\n else:\n bpy.ops.wm.open_mainfile( filepath=path_project )\n count_frames = bpy.context.scene.frame_end\n frames_count[project_name] = {'project_name': project_name, 'count': count_frames}\n return count_frames", "def n_total_files(self):\n return len(self.fileinfo)", "def countDataSize(self,filename):\n \n try:\n d = h5py.File(filename,'r')\n except:\n print(filename)\n return \n\n N = 0\n scan_edges = d['level2/Statistics/scan_edges'][:]\n for (start,end) in scan_edges:\n N += (end-start)//self.offsetLen * self.offsetLen\n d.close()\n\n N = N*self.Nfeeds\n\n self.chunks += [[int(self.Nsamples), int(self.Nsamples+N)]]\n self.datasizes += [int(N/self.Nfeeds)]\n self.Nsamples += int(N)", "def getFrameNumber(fileName, jointNumber):\n with open(fileName) as f:\n for i, l in enumerate(f):\n pass\n return (i+1)//jointNumber", "def count_timepoints(sc, session, files):\n tuples = zip(range(len(files)), files)\n files_sc = sc.parallelize(tuples)\n\n def count_planes(kv):\n index, path2 = kv\n try:\n from ScanImageTiffReader import ScanImageTiffReader\n img = ScanImageTiffReader(path2).data()\n except Exception:\n import tifffile\n img = tifffile.imread(path2)\n return img.shape[0]\n\n data2 = files_sc.map(count_planes).collect()\n frame_numbers = np.array(data2)\n vol_numbers = frame_numbers / len(session.fieldMask)\n return vol_numbers.astype(int)", "def bspb_frameCounter():\n curTime = int(pm.currentTime())\n maxTime = int(pm.playbackOptions(q=True, maxTime=True))\n return '{0} / {1}'.format(curTime, maxTime)" ]
[ "0.74542373", "0.7406315", "0.7370555", "0.72805226", "0.7188262", "0.7104647", "0.7006017", "0.69780296", "0.6961714", "0.69169277", "0.6900665", "0.68425745", "0.68258554", "0.6812102", "0.678976", "0.6787598", "0.67869854", "0.6778575", "0.6771477", "0.67232835", "0.6706704", "0.6684075", "0.66721106", "0.6627526", "0.6616906", "0.6587772", "0.658508", "0.65677994", "0.6566209", "0.6550547" ]
0.81756157
0
Interprets the header of the YUV file
def __read_header(self): header = self.__file_object.readline() header_string = header.decode('utf-8') print(header_string) # Ignore first letter self.frame_width = int(re.findall('W\d+', header_string)[0][1:]) self.frame_height = int(re.findall('H\d+', header_string)[0][1:]) self.frame_rate = re.findall('F\d+\:\d+', header_string)[0][1:] # Calculate actual frame rate given the value is a ratio tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')] self.frame_rate = round(tokens[0] / tokens[1], 1) self.__pixel_aspect_ratio = re.findall('A\d+\:\d+', header_string)[0][1:] # Calculate actual pixel aspect ratio rate given the value is a ratio tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')] self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1) # Don't ignore for interlacing self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0] # Ignore first 'FRAME\n' terminator so the file object points to the first byte of raw data of the first frame self.__file_object.readline() self.__first_frame_raw_data_position = self.__file_object.tell() self.determine_color_space_by_frame_size() # Restore self.__file_object.seek(self.__first_frame_raw_data_position) return header # Color space parameter is missing? print('FourCC:\t\t', header_string[:4]) print('Input file:\t', self.__input_file_path) print('Frame size:\t', f'{self.frame_width}x{self.frame_height}') print('Frame rate:\t', f'{self.frame_rate} FPS') print('Aspect Ratio:\t', self.__pixel_aspect_ratio) print('Color space\t', self.color_space) print('Frame size (raw data):', self.__frame_raw_data_size) print('Position of first raw:', self.__first_frame_raw_data_position)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _decode_header(self):\n #header = self.file_content[0:6]\n log_screen_descr = self.file_content[6:13]\n self.canvas_width = log_screen_descr[0] + (log_screen_descr[1]<<8)\n self.canvas_height = log_screen_descr[2] + (log_screen_descr[3]<<8)\n # is there a global color table? (usually yes)\n flags = log_screen_descr[4]\n self.glob_col_table = (flags & 0b10000000) != 0\n\n # determine the number of bits per primary color value\n self.color_resolution = (flags & 0b01110000) >> 4\n self.bits_per_pixel = self.color_resolution + 1\n\n # If the value is 1, then the colors in the global color table are sorted\n # in order of \"decreasing importance,\" which typically means \"decreasing\n # frequency\" in the image\n self.sort_flag = (flags & 0b00001000) != 0\n\n # If this value is N, then the actual table size is 2^(N+1).\n self.glob_col_table_sz = 1 << ((flags & 0b00000111)+1)\n\n self.bg_color_index = log_screen_descr[5]\n self.pix_asp_ratio = log_screen_descr[6]", "def parse_header(self):\n self._get_decompressor()\n whs = jpeg.ffi.new(\"int[]\", 3)\n whs_base = int(jpeg.ffi.cast(\"size_t\", whs))\n whs_itemsize = int(jpeg.ffi.sizeof(\"int\"))\n n = self.lib_.tjDecompressHeader2(\n self.decompressor.handle_,\n jpeg.ffi.cast(\"unsigned char*\",\n self.source.__array_interface__[\"data\"][0]),\n self.source.nbytes,\n jpeg.ffi.cast(\"int*\", whs_base),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize),\n jpeg.ffi.cast(\"int*\", whs_base + whs_itemsize + whs_itemsize))\n if n:\n raise JPEGRuntimeError(\"tjDecompressHeader2() failed with error \"\n \"%d and error string %s\" %\n (n, self.get_last_error()), n)\n self.width = int(whs[0])\n self.height = int(whs[1])\n self.subsampling = int(whs[2])", "def parseY4MHeader(y4m):\n w = 0; h = 0; fps_num = 0; fps_denom = 0; fr = 0; fmt = \"420\"; bit_depth = 8;\n #print(\"parsing \" + y4m)\n with open(y4m, 'rb') as f:\n line = f.readline().decode('utf-8')\n #YUV4MPEG2 W4096 H2160 F30000:1001 Ip A0:0 C420p10 XYSCSS=420P10\n m = re.search(r\"W([0-9]+) H([0-9]+) F([0-9]+)\\:([0-9]+)\", line)\n if m:\n w = int(m.group(1))\n h = int(m.group(2))\n fps_num = float(m.group(3))\n fps_denom = float(m.group(4))\n fps = round(fps_num / fps_denom)\n m = re.search(r\"C([0-9]+)p([0-9]+)\", line)\n if m:\n fmt = m.group(1)\n bit_depth = int(m.group(2))\n if w == 0 or h == 0 or fps == 0:\n print(\"Failed to parse the input y4m file!\\n\")\n sys.exit()\n return (w, h, fps_num, fps_denom, fps, fmt, bit_depth)", "def _decode_header(self, buf):\n ord_data = self._decode_vint(buf)\n f_type = ord_data & 7\n f_id = ord_data >> 3\n return f_type, f_id", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")", "def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header", "def parse_header(self):", "def _parseHeader(self):\n # Big or little endian for the header.\n self._getEndianess()\n # Read the fixed header.\n self._readFixedHeader()\n # Get the present blockettes.\n self._getBlockettes()\n # Calculate the starttime.\n self._calculateStarttime()", "def _read_old_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.version = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.revision = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 26\n self.date = struct.unpack('<26s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.file_format = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.original_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.reference_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_a = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_b = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_c = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_d = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 82\n self.annotate = struct.unpack('<82s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_model = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_serial_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.software_version_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.crystal_material = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_wavelength_microns = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.laser_null_doubling = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.optical_ratio = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xc = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xm = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xb = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_size = struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.acquire_mode = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.emissivity = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.apodization = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.zero_fill = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.run_time_math = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.fft_size = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_coadds = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_igrams = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.amb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.inst_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.wbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.cbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 20\n self.spare_i = struct.unpack('<hhhhhhhhhh',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_f = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_l = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 65\n self.spare_na = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nb = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nc = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nd = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_ne = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF2_GetHeader(self)", "def readframeheader(self):\n numbytes = self.readdword()\n magic = self.readword()\n assert magic == 0xF1FA\n oldchunks = self.readword()\n frameduration = self.readword()\n _ = self.readbytearr(2)\n newchunks = self.readdword()\n numchunks = oldchunks\n if oldchunks == 0xFFFF and newchunks != 0:\n numchunks = newchunks\n return {\n \"framebytes\": numbytes,\n \"frameduration\": frameduration,\n \"numchunks\": numchunks,\n }", "def _read_new_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.version = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.revision = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 28\n self.date = struct.unpack('<28s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_format = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.original_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.reference_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_a = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_b = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_c = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 84\n self.annotate = struct.unpack('<84s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_model = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_serial_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.software_version_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.crystal_material = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_wavelength_microns = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_null_doubling = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.padding = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xc = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xm = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xb = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.num_chan = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.interferogram_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.scan_direction = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.acquire_mode = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.emissivity = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.apodization = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.zero_fill = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.run_time_math = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.fft_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.number_of_coadds = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.single_sided = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.chan_display = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.amb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.inst_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.wbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.cbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.temperature_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.emissivity_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 40\n self.spare_i = struct.unpack('<llllllllll',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 80\n self.spare_f = struct.unpack('<dddddddddd',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 68\n self.spare_na = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nb = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nc = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nd = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_ne = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size", "def _read_pnm_header(self, data):\r\n bpm = data[1:2] in b\"14\"\r\n regroups = re.search(b\"\".join((\r\n b\"(^(P[123456]|P7 332)\\s+(?:#.*[\\r\\n])*\",\r\n b\"\\s*(\\d+)\\s+(?:#.*[\\r\\n])*\",\r\n b\"\\s*(\\d+)\\s+(?:#.*[\\r\\n])*\" * (not bpm),\r\n b\"\\s*(\\d+)\\s(?:\\s*#.*[\\r\\n]\\s)*)\")), data).groups() + (1, ) * bpm\r\n self.header = regroups[0]\r\n self.magicnum = regroups[1]\r\n self.width = int(regroups[2])\r\n self.height = int(regroups[3])\r\n self.maxval = int(regroups[4])\r\n self.depth = 3 if self.magicnum in b\"P3P6P7 332\" else 1\r\n self.tupltypes = [self._types[self.magicnum]]", "def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h", "def _first_IHDR(self, data):\n w, h, d, ctype, comp, filt, ilace = struct.unpack(\">2L5B\", data)\n self.width = w\n self.height = h", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD2_GetHeader(self)", "def __get_next_yuv_frame(self):\n raw_frame_buffer = self.__file_object.read(self.__frame_raw_data_size)\n\n # Ignore FRAME header\n self.__file_object.readline()\n return raw_frame_buffer", "def _readHeader(self):\n self.ControllerVersion = self._readInt(0)\n self.LogicOutput = self._readInt(2)\n self.AppHiCapLowNoise = self._readInt(4)\n self.TimingMode = self._readInt(8)\n self.Exposure = self._readFloat(10)\n self.DetTemperature = self._readFloat(36)\n self.DetectorType = self._readInt(40)\n self.TriggerDiode = self._readInt(44)\n self.DelayTime = self._readFloat(46)\n self.ShutterControl = self._readInt(50)\n self.AbsorbLive = self._readInt(52)\n self.AbsorbMode = self._readInt(54)\n self.CanDoVirtualChip = self._readInt(56)\n self.ThresholdMinLive = self._readInt(58)\n self.ThresholdMin = self._readFloat(60)\n self.ThresholdMaxLive = self._readInt(64)\n self.ThresholdMax = self._readFloat(66)\n self.ADCOffset = self._readInt(188)\n self.ADCRate = self._readInt(190)\n self.ADCType = self._readInt(192)\n self.ADCRes = self._readInt(194)\n self.ADCBitAdj = self._readInt(196)\n self.Gain = self._readInt(198)\n self.GeometricOps = self._readInt(600)", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMF3_GetHeader(self)", "def getheader(filename):\n # read header and convert to string\n h = np.fromfile(filename, dtype='uint8', count=512)\n header = ''\n for s in h[h > 0]:\n header += chr(s)\n # start reading at 'datatype'\n hd = header[header.lower().find('datatype'):]\n hd = hd.split(':')[0].replace(',', ' ').split()\n # Types: uint8 int16 int32 float32\n typelist = ['u1', 'i2', 'i4', 'f4']\n # extract datatype\n try:\n dtype = typelist[int(hd[0].split('=')[1]) - 1]\n except:\n print(header)\n raise IOError('getheader: datatype invalid or missing')\n # extract endianness\n try:\n if hd[-1].split('=')[0].lower() != 'endian':\n raise IndexError()\n endian = hd[-1].split('=')[1]\n except IndexError:\n print(header)\n raise IOError('getheader: endianess missing.')\n if endian.lower() == 'l':\n dtype = '<' + dtype\n else:\n dtype = '>' + dtype\n # extract dims\n try:\n if hd[2].split('=')[0].lower() != 'dims':\n raise IndexError()\n dims = int(hd[2].split('=')[1])\n if dims not in [2, 3]:\n raise ValueError('Invalid dims=%i (must be 2 or 3)' % dims)\n except IndexError:\n print(header)\n raise IOError('getheader: dims invalid or missing.')\n try:\n if hd[3].split('=')[0].lower() != 'nx':\n raise IndexError()\n nx = int(hd[3].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nx invalid or missing.')\n try:\n if hd[4].split('=')[0].lower() != 'ny':\n raise IndexError()\n ny = int(hd[4].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: ny invalid or missing.')\n if dims == 3:\n try:\n if hd[5].split('=')[0].lower() != 'nt':\n raise IndexError()\n nt = int(hd[5].split('=')[1])\n except:\n print(header)\n raise IOError('getheader: nt invalid or missing.')\n shape = (nx, ny, nt)\n else:\n shape = (nx, ny)\n return [shape, dtype, header]", "def parseheader(self):\n for line in self.rawheader.split(\"\\n\"):\n pat = \"QUITTING\"\n if pat in line:\n self.prefix = line\n continue\n\n pat = \"VERSION NUMBER\"\n if pat in line:\n self.softvers = line[28:].strip()\n continue\n\n pat = \"DATE/TIME IS\"\n if pat in line:\n meta = line[22:].strip()\n matchobj = dtpat.match(meta)\n if matchobj:\n try:\n self.dumpdt = datetime.strptime(meta, moddtfmt)\n except:\n self.nodump = True\n self.comment += (\n \" *** Cannot read module date/time: {}\\n\".format(meta)\n )\n continue\n\n pat = \"NUMBER RECORDS IS\"\n if pat in line:\n self.ndumprec = line[22:].strip()\n continue\n\n pat = \"MODULE TYPE IS\"\n if pat in line:\n self.modtype = line[22:].strip()\n continue\n\n pat = \"SERIAL NUMBER IS\"\n if pat in line:\n self.modserial = line[22:].strip()\n continue\n\n pat = \"COND S/N IS\"\n if pat in line:\n meta = line[22:].strip()\n serials = meta.split(\"/\")\n self.cellserial = serials[1]\n self.ioserial = serials[0]\n continue\n\n pat = \"SAMPLING INTERVAL IS\"\n if pat in line:\n meta = line[22:].strip()\n self.sampintv = meta\n if meta == \"00:01:00\":\n self.nodump = False\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n elif meta != \"00:02:00\":\n self.nodump = True\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n continue\n\n pat = \"AVERAGE INTERVAL IS\"\n if pat in line:\n self.avgintv = line[22:].strip()\n if int(self.avgintv) != 24:\n self.nodump = True\n self.comment += \" *** Average interval is {}\\n\".format(meta)\n continue\n\n pat = \"BATTERY VOLTAGE IS\"\n if pat in line:\n self.voltage = line[22:].strip()\n continue\n\n return self.modserial", "def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d", "def _parse_header(self):\n # read the first bytes from the file\n header = self._stream_handle.read(HEADER_BYTES)\n match = HEADER_MATCHER.match(header)\n if not match:\n raise SampleException(\"File header does not match the header regex\")\n\n # update the state to show we have read the header\n self._increment_state(HEADER_BYTES)", "def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0", "def header(fpath):\n # If you want to change something, instead of overwriting a bug, add a new\n # key with the desired functionallity. This way, prior code doesn't break.\n # One can be very waste full with this function as it is fast anyways.\n\n\n ret = {}\n with open(fpath) as f:\n for line in f:\n if line[0] is not \"#\":\n break\n # Strip comment marker\n line = line[2:]\n name, value = line.split(\"=\")\n # Strip newline\n ret[name] = value[:-1]\n\n # To have some compatibility between spe veronica and viktor files,\n # we further unify some of the namings\n ret['gain'] = ret.get('Gain')\n\n exp_time = ret.get('ExposureTime [s]')\n if exp_time:\n ret['exposure_time'] = datetime.timedelta(seconds=float(exp_time))\n\n hbin = ret.get('HBin')\n if hbin:\n ret['hbin'] = {'ON': True}.get(value, False)\n\n cw = ret.get('Central-Wavelength')\n if cw:\n ret['central_wl'] = float(cw)\n\n vis_wl = ret.get('vis-Wavelength')\n if vis_wl:\n ret['vis_wl'] = float(vis_wl)\n\n syringe_pos = ret.get('Syringe Pos')\n if syringe_pos:\n ret['syringe_pos'] = int(syringe_pos)\n\n cursor = ret.get(\"Cursor\")\n if cursor:\n ret['cursor'] = tuple([int(elm) for elm in cursor.split('\\t')])\n\n x_mirror = ret.get('x-mirror')\n if x_mirror:\n ret['x_mirror'] = {'ON': True}.get(x_mirror, False)\n\n calib_coeff = ret.get('calib Coeff')\n if calib_coeff:\n ret['calib Coeff'] = tuple([float(elm) for elm in calib_coeff.split('\\t')])\n # Index 0 is actually central_wl during calibration,\n ret['calib_central_wl'] = ret['calib Coeff'][0]\n\n\n # For np.poly1d the calibration coefficents need to be in decreasing\n # order and no zero values are not allowed\n _cc = np.array(ret['calib Coeff'][1:])\n ret['calib_coeff'] = _cc[np.nonzero(_cc)][::-1]\n\n scan_start_time = ret.get('Scan Start time')\n if scan_start_time:\n ret['date'] = datetime.datetime.strptime(scan_start_time, '%d.%m.%Y %H:%M:%S')\n\n scan_stop_time = ret.get('Scan Stop time')\n if scan_stop_time:\n ret['date_stop'] = datetime.datetime.strptime(scan_stop_time, '%d.%m.%Y %H:%M:%S')\n\n timedelay = ret.get('Timedelay')\n if timedelay:\n ret['timedelay'] = np.array([int(elm) for elm in timedelay.split('\\t')])\n\n timedelay_pos= ret.get('Timedelay Pos')\n if timedelay_pos:\n ret['timedel_pos'] = np.array([int(elm) for elm in timedelay_pos.split('\\t')])\n\n return ret", "def _read_binary_file_header(self, header):\n pos = 0\n for item in BINARY_FILE_HEADER_FORMAT:\n length, name, _ = item\n string = header[pos: pos + length]\n pos += length\n # Unpack according to different lengths.\n if length == 2:\n format = ('%sh' % self.endian).encode('ascii', 'strict')\n # Set the class attribute.\n setattr(self, name, unpack(format, string)[0])\n # Update: Seems to be correct. Two's complement integers seem to be\n # the common way to store integer values.\n elif length == 4:\n format = ('%si' % self.endian).encode('ascii', 'strict')\n # Set the class attribute.\n setattr(self, name, unpack(format, string)[0])\n # The other value are the unassigned values. As it is unclear how\n # these are formatted they will be stored as strings.\n elif name.startswith('unassigned'):\n # These are only the unassigned fields.\n format = 'h' * (length // 2)\n # Set the class attribute.\n setattr(self, name, string)\n # Should not happen.\n else:\n raise Exception", "def GetHeader(self) -> \"char const *\":\n return _itkVTKPolyDataReaderPython.itkVTKPolyDataReaderMD3_GetHeader(self)", "def _header(self, pam=False):\r\n if pam or self.magicnum == b'P7':\r\n header = \"\\n\".join((\r\n \"P7\",\r\n \"HEIGHT %i\" % self.height,\r\n \"WIDTH %i\" % self.width,\r\n \"DEPTH %i\" % self.depth,\r\n \"MAXVAL %i\" % self.maxval,\r\n \"\\n\".join(\"TUPLTYPE %s\" % unicode(i) for i in self.tupltypes),\r\n \"ENDHDR\\n\"))\r\n elif self.maxval == 1:\r\n header = \"P4 %i %i\\n\" % (self.width, self.height)\r\n elif self.depth == 1:\r\n header = \"P5 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n else:\r\n header = \"P6 %i %i %i\\n\" % (self.width, self.height, self.maxval)\r\n if sys.version_info[0] > 2:\r\n header = bytes(header, 'ascii')\r\n return header", "def verify_header (filename, htypes=None):\n\n # dictionary\n dict_head = {\n # raw header\n # commenting out SIMPLE, BSCALE and BZERO - basic keywords\n # that will be present in images but not in binary fits tables\n #'SIMPLE': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n #'BSCALE': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n #'BZERO': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BITPIX': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS1': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'NAXIS2': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BUNIT': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n #'CCD-AMP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'SET-TEMP': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'CCD-TEMP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'XBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'YBINNING': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n #'CCD-SET': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ALTITUDE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AZIMUTH': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DOMEAZ': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RADESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'EPOCH': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'RA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'RA-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'RA-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'DEC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'DEC-REF': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n #'DEC-TEL': {'htype':'raw', 'dtype':float, 'DB':False, 'None_OK':True},\n 'HA': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'FLIPSTAT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'EXPTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ISTRACKI': {'htype':'raw', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'ACQSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'ACQEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSSTART': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPSEND': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GPS-SHUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DATE-OBS': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'MJD-OBS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'LST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'UTC': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'TIMESYS': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'ORIGIN': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MPC-CODE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'TELESCOP': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'CL-BASE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RH-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRESSURE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-PIER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-DOME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-ROOF': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-AIRCO': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-MAST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-STRUT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRING': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-SPIDER': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-FWS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M2HOLD': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-GUICAM': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-M1': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYWIN': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYGET': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-CRYCP': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PRES-CRY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDAVE': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDGUST': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'WINDDIR': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELAT': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SITELONG': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ELEVATIO': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n #'WEATIME': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'FILTER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n #'FILTERID': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'CCD-ID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'CONTROLL': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'DETSPEED': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'CCD-NW': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'CCD-NH': {'htype':'raw', 'dtype':int, 'DB':False, 'None_OK':True},\n 'INSTRUME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FOCUSPOS': {'htype':'raw', 'dtype':int, 'DB':True, 'None_OK':True},\n 'IMAGETYP': {'htype':'raw', 'dtype':str, 'DB':False, 'None_OK':True},\n 'OBJECT': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'AIRMASS': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':False},\n 'ORIGFILE': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':False},\n 'OBSERVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'ABOTVER': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGNAME': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PROGID': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERST': {'htype':'raw', 'dtype':str, 'DB':True, 'None_OK':True},\n 'GUIDERFQ': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'TRAKTIME': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCX': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n 'ADCY': {'htype':'raw', 'dtype':float, 'DB':True, 'None_OK':True},\n #\n # full header\n 'BB-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'BB-START': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'KW-V': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'LOG': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'LOG-IMA': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'N-INFNAN': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'XTALK-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'XTALK-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'NONLIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NONLIN-F': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'GAIN-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'GAIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'GAIN16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'OS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'BIASMEAN': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIASM16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDNOISE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN1': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RDN16': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'BIAS1A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS1A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK1': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BIAS16A0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BIAS16A1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'VFITOK16': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'MBIAS-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MBIAS-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MB-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'SATURATE': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NOBJ-SAT': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'MFLAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFLAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MF-NDAYS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MFRING-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MFRING-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'FRRATIO': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'COSMIC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NCOSMICS': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'SAT-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'NSATS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'REDFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'MASKFILE': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'S-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'S-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'S-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'S-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-FWSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'S-SEEING': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-SEESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELONG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-ELOSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-BKGSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'S-VIGNET': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-CORR': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'BKG-CHI2': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF1': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-CF16': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'BKG-FDEG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'BKG-FC0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'A-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-INDEX': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'A-PSCALE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-PSCALX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-PSCALY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROT': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-ROTX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-ROTY': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'A-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'A-NAST': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'A-TNAST': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-NAMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'A-DRA': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DRASTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDEC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'A-DDESTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PSF-V': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n 'PSF-RAD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-RADP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SIZE': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FRAC': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SAMP': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-CFGS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-NOBJ': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PSF-FIX': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'PSF-PLDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PSF-CHI2': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-FWHM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-SEE': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PSF-PMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-PSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMIN': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMAX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BMED': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-BSTD': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-EMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-ESTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMNG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMXG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FMDG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PSF-FSTG': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-P': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'PC-CAT-F': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':True},\n 'PC-NCAL': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'PC-TNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-FNCAL': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMAX': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NCMIN': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPFDG': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-ZPF0': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-TNSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-NSUB': {'htype':'full', 'dtype':int, 'DB':False, 'None_OK':True},\n 'PC-MZPD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-MZPS': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZPDEF': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'PC-ZP': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-ZPSTD': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-EXTCO': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'AIRMASSC': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'RA-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'DEC-CNTR': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'PC-AIRM': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'NSIGMA': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'LIMEFLUX': {'htype':'full', 'dtype':float, 'DB':False, 'None_OK':True},\n 'LIMMAG': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'NOBJECTS': {'htype':'full', 'dtype':int, 'DB':True, 'None_OK':True},\n 'RADECOFF': {'htype':'full', 'dtype':float, 'DB':True, 'None_OK':True},\n 'FORMAT-P': {'htype':'full', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'DUMCAT': {'htype':'full', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'QC-FLAG': {'htype':'full', 'dtype':str, 'DB':True, 'None_OK':False},\n 'DATEFILE': {'htype':'full', 'dtype':str, 'DB':False, 'None_OK':True},\n #\n # transient header\n 'SWARP-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'SWARP-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-REF': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-DXYLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-DX': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DY': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DXSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-DYSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRLOC': {'htype':'trans', 'dtype':bool, 'DB':False, 'None_OK':True},\n 'Z-FNR': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FNRSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'Z-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'Z-SIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-BSIZE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'Z-SCMED': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-SCSTD': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'Z-FPEMED': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'Z-FPESTD': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NSIGMA': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-LFLUX': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'T-NTRANS': {'htype':'trans', 'dtype':int, 'DB':True, 'None_OK':True},\n 'T-FTRANS': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-LMAG': {'htype':'trans', 'dtype':float, 'DB':True, 'None_OK':True},\n 'T-NFAKE': {'htype':'trans', 'dtype':int, 'DB':False, 'None_OK':True},\n 'T-FAKESN': {'htype':'trans', 'dtype':float, 'DB':False, 'None_OK':True},\n 'MC-P': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'MC-V': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'MC-MODEL': {'htype':'trans', 'dtype':str, 'DB':False, 'None_OK':True},\n 'TDUMCAT': {'htype':'trans', 'dtype':bool, 'DB':True, 'None_OK':False},\n 'TQC-FLAG': {'htype':'trans', 'dtype':str, 'DB':True, 'None_OK':False},\n }\n\n # read header of filename\n if isfile (filename):\n header = read_hdulist (filename, get_data=False, get_header=True)\n else:\n # return success=False if it does not exist\n log.warning ('file {} does not exist; not able to verify its header'\n .format(filename))\n return False\n\n\n # force [htypes] to be a list\n htypes_list = list(htypes)\n\n # loop keys in dict_head\n for key in dict_head.keys():\n\n # only check keywords with htype matching the input [htypes]\n if dict_head[key]['htype'] not in htypes_list:\n continue\n\n # check that key is present in header\n if key in header:\n\n # provide warning if dtype not as expected and header\n # keyword value is not 'None'\n if (dict_head[key]['dtype'] != type(header[key]) and\n header[key] != 'None'):\n log.warning ('dtype of keyword {}: {} does not match the '\n 'expected dtype: {} in header of {}'\n .format(key, type(header[key]),\n dict_head[key]['dtype'], filename))\n\n # if key goes to DataBase and value is 'None' or None\n # while 'None_OK' is False, raise an exception\n if (dict_head[key]['DB'] and not dict_head[key]['None_OK'] and\n (header[key] is None or header[key] == 'None')):\n msg = ('DataBase keyword {} not allowed to have \\'None\\' or '\n 'None value in header of {}'.format(key, filename))\n log.error (msg)\n raise ValueError (msg)\n\n\n else:\n msg = 'keyword {} not present in header of {}'.format(key, filename)\n # if keyword will be ingested into the database, raise an exception\n if dict_head[key]['DB']:\n log.error (msg)\n raise KeyError (msg)\n\n else:\n log.warning (msg)\n\n\n return" ]
[ "0.7273211", "0.6998706", "0.6676672", "0.66659373", "0.66317815", "0.6631482", "0.65219945", "0.65159136", "0.64302945", "0.6422038", "0.6355497", "0.6351281", "0.6338665", "0.63145584", "0.6309015", "0.6292449", "0.629189", "0.62856424", "0.62795275", "0.62275565", "0.61913735", "0.61688286", "0.6166101", "0.6143341", "0.6136294", "0.61207396", "0.6114243", "0.60956967", "0.60891855", "0.6071703" ]
0.74078685
0
Returns a buffer containing the next frame in the file
def __get_next_yuv_frame(self): raw_frame_buffer = self.__file_object.read(self.__frame_raw_data_size) # Ignore FRAME header self.__file_object.readline() return raw_frame_buffer
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_next_frame(self):\n try:\n return self.framegen.next()\n except StopIteration:\n return None", "def __readNext(self, f) -> bytes:\n try:\n fBuffer = f.read(Rudp.Packet.payloadMax)\n except Exception as e:\n print(\"Exception when reading file \", f, \". Because:\", format(e))\n return fBuffer", "def next_frame(self, save_index=True):\n if len(self._frames) > self._findex + 1:\n self._findex += 1\n frame_start = self._findex * self._flen\n if not save_index:\n self._index = frame_start\n else:\n if self._index + self._flen <= len(self) - 1 and save_index:\n self._index += self._flen\n else:\n self._index = frame_start + len(self.frame) - 1\n return self._frames[self._findex]\n return None", "def read(self):\r\n frame = self.last_frame\r\n return frame", "def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = hdf5image()\n newobj.read(next_filename(self.filename))\n return newobj", "def next(self):\n if self.currentframe < (self.nframes - 1) and self.nframes > 1:\n return self.getframe(self.currentframe + 1)\n else:\n newobj = pixiimage()\n newobj.read(next_filename(\n self.sequencefilename))\n return newobj", "def next_batch(self):\n\n while self.cap.isOpened():\n flag, frame = self.cap.read()\n yield frame", "async def _retrieve_frame(self, mode: BufferRetrieveMode) -> RawArray:", "def read_frame(self):\n _temp = self._read_unpack(4, lilendian=True)\n if _temp is None: raise EOFError\n\n _time = datetime.datetime.fromtimestamp(_temp)\n _tsss = _temp\n _tsus = self._read_unpack(4, lilendian=True)\n _ilen = self._read_unpack(4, lilendian=True)\n _olen = self._read_unpack(4, lilendian=True)\n\n frame = dict(\n frame_info = dict(\n ts_sec = _tsss,\n ts_usec = _tsus,\n incl_len = _ilen,\n orig_len = _olen,\n ),\n time = _time,\n number = self._fnum,\n time_epoch = f'{_tsss}.{_tsus} seconds',\n len = _ilen,\n cap_len = _olen,\n )\n\n length = frame['cap_len']\n return self._decode_next_layer(frame, length)", "def next_file(self):\n raise NotImplementedError()", "def grab_frame(self):\n with self._buflock:\n if self._buffer is None:\n return None\n buf = self._buffer.tostring()\n return buf", "def next_packet(filename, memorymap=True):\n with open(filename, 'rb') as f:\n \n #memory map the file if necessary (prob requires 64 bit systems)\n _file = f\n if memorymap:\n _file = mmap.mmap(f.fileno(), 0, prot=mmap.PROT_READ)\n \n while True:\n packet = _file.read(TS.PACKET_SIZE)\n if packet:\n # first byte SHOULD be the sync byte\n # but if it isn't find one.\n if packet[0] != TS.SYNC_BYTE:\n start_byte = 0\n print packet[0]\n for i in range(start_byte, TS.PACKET_SIZE):\n if packet[i] == TS.SYNC_BYTE:\n start_byte = i\n break\n # didn't find a new start? FAIL\n if start_byte == 0:\n raise Exception(\"failure to find sync byte in ts packet size.\")\n continue\n remainder = _file.read(TS.PACKET_SIZE - start_byte)\n packet = packet[start_byte:] + remainder\n yield packet\n else:\n break", "def get_frame_sequence(captured_file):\n frame_seq = []\n get_all_frame = \"tshark -r {} -Y 'http.request || http.response' -T fields -e frame.number\".format(captured_file)\n frames = run_command(get_all_frame, True)\n for f in frames:\n fn = int(f.decode('utf8').rstrip('\\n'))\n frame_seq.append(HTTPNode(fn))\n \n return frame_seq", "def captureNextFrame(self):\r\n mainls = []\r\n\r\n\r\n ret, readFrame = self.capture.read()\r\n\r\n if (ret == True):\r\n self.currentFrame = cv2.cvtColor(readFrame, cv2.COLOR_BGR2RGB)\r\n self.faceDetection(self.currentFrame)\r\n self.currentFrame = self.bbFrame", "def _next_frame(self):\n ret, self.frame = self.capture.read()\n if not ret:\n self.logger.warning('Failed to read frame')\n if self.show_video:\n cv2.imshow('frame', self.frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit(0)\n return ret", "def next(self):\n result = None\n while result is None:\n if self._filehandle is None:\n if self.compressed:\n self._filehandle = gzip.GzipFile(self._filename, \"r\")\n else:\n self._filehandle = open(self._filename, \"r\")\n line = self._filehandle.next()\n line = line.rstrip()\n self._totallines += 1\n result = self.decodeline(line)\n return result", "def _read_next_bytes(\n fid, num_bytes, format_char_sequence, endian_character=\"<\"\n ):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)", "def next_frame(self):\n while True:\n if self.grabbed:\n buffer = self.__get_next_yuv_frame()\n if len(buffer) != self.__frame_raw_data_size:\n self.frame = False, False\n self.stopped = True\n break\n\n y, u, v = self.__extract_yuv_planes(buffer)\n\n # Save YUV planes now because they will be reshaped from (height, width) to (height, width, 1)\n\n converted_frame = self.__concatenate_planes_to_444yuv_frame(y, u, v)\n\n self.frame = True, converted_frame\n self.grabbed = False\n\n if self.stopped:\n break\n\n time.sleep(1/1000)", "def grab_next_frame(self):\n if Rescue_PI.input_video_file_path is None:\n self.orig_frame = self.vs.read()\n self.frame = self.orig_frame.copy()\n else:\n _, self.frame = self.vs.read()\n # self.frame = cv2.rotate(self.frame, cv2.ROTATE_180)\n if self.frame is None:\n pass\n else:\n self.frame = imutils.resize(self.frame, width=frame_width_in_pixels)", "def captureNextFrame(self):\n ret, readFrame=self.capture.read()\n if(ret==True):\n self.currentFrame=cv2.cvtColor(readFrame,cv2.COLOR_BGR2RGB)", "def parse_frames(self):\r\n done = False\r\n self._ip = 13 + self.ct_len\r\n while not done:\r\n code = self.next_byte()\r\n if not code:\r\n raise ValueError(\"Unexcepted end of file\")\r\n if code == b\"\\x2C\":\r\n self.parse_frame()\r\n elif code == b\"\\x21\":\r\n code = self.next_byte()\r\n if code == b\"\\xF9\":\r\n self.g_ext.append(self.parse_gce())\r\n elif code == b\"\\xFF\":\r\n self.next_byte()\r\n app = self.next_bytes(11)\r\n if app == b\"NETSCAPE2.0\":\r\n self.parse_ne()\r\n else:\r\n self.skip()\r\n elif code == b\"\\xFE\":\r\n self.comments.append(self.parse_ce())\r\n else:\r\n self.next_bytes(13)\r\n self.skip()\r\n elif code == b\"\\x3B\":\r\n done = True", "def read_next_bytes(fid, num_bytes, format_char_sequence, endian_character=\"<\"):\n data = fid.read(num_bytes)\n return struct.unpack(endian_character + format_char_sequence, data)", "def read_frame(self):\n return self.decode_frame(self.grab_frame())", "def getNextModFrame(self,name):\n return self.data.activeMod[name].modFramesNext[-1]", "def next(self):\n frame = self.xyzFile.nextFrame()\n if frame is None: return None\n \n newFrame = XYZFrame()\n newFrame.boxVectors = self.lattice.boxVectors\n refFrame = XYZFrame()\n refFrame.boxVectors = self.lattice.boxVectors\n atomsLists = self.propagateAtomsThroughPbc(frame.atoms, frame.boxSize)\n \n allAtoms = concatenate(atomsLists) \n posCount = len(atomsLists[0])\n \n match, referenceMatch, errors = self.match(atomsLists) \n for atomIndex in range(posCount):\n newFrame.atoms.append(XYZAtom(atomsLists[0][atomIndex].symbol\n , *self.lattice.positions[match[atomIndex]].x0))\n \n for atomIndex in range(posCount):\n refFrame.atoms.append(XYZAtom(allAtoms[referenceMatch[atomIndex]].__repr__())) \n refFrame.atoms[-1].x += 15\n \n for atomIndex in range(len(allAtoms)):\n refFrame.atoms.append(XYZAtom(allAtoms[atomIndex].__repr__())) \n refFrame.atoms[-1].x += 30\n \n return ProjectedFrame(newFrame, refFrame, errors)", "def get_still(self):\n _, frame = self.client.read()\n return frame", "async def get(self) -> RawArray:\r\n if self.empty():\r\n return None\r\n frame = self.frames[self._read_index]\r\n\r\n self._read_index = (self._read_index + 1) % self.capacity()\r\n self._is_full = False\r\n\r\n return frame", "def read(self):\n # return next frame in the queue\n return self.Q.get()", "def next_sequence(self):\n code, sequence = \"\", \"\"\n with open(self.ifilename, \"r\") as fr:\n for line in fr:\n line = line.strip()\n if line.startswith(\">\"): # is header line\n if code != \"\":\n # new sequence encountered, serve the previous one\n yield(code, header, sequence)\n header, code, sequence = line, _extract_code(line), sequence\n else:\n sequence += line\n # serve the last sequence\n yield(code, header, sequence)", "def __next__(self):\n while True:\n self.stream_bytes += self.stream_conn.read(1024)\n first = bytearray(self.stream_bytes).find(b'\\xff\\xd8')\n last = bytearray(self.stream_bytes).find(b'\\xff\\xd9')\n if first != -1 and last != -1:\n jpg = self.stream_bytes[first:last + 2]\n self.stream_bytes = self.stream_bytes[last + 2:]\n image = cv2.imdecode(np.fromstring(jpg, dtype=np.uint8), 0)\n self.total_frame += 1\n return image" ]
[ "0.658763", "0.6512409", "0.64798796", "0.63112426", "0.6304167", "0.6280922", "0.62393665", "0.6209724", "0.6112247", "0.6055701", "0.5982976", "0.5950312", "0.5921946", "0.59020406", "0.5859815", "0.58452123", "0.5819238", "0.5807083", "0.57893544", "0.5751586", "0.5741709", "0.5727332", "0.5725959", "0.57219684", "0.56841886", "0.5682388", "0.56439227", "0.5640948", "0.5637422", "0.56285053" ]
0.7203122
0
Builds a YUV frame from the 3 planes
def __concatenate_planes_to_444yuv_frame(self, y_plane, u_plane, v_plane): np.set_printoptions(formatter={'int': hex}) y_plane.shape = (self.frame_height, self.frame_width, 1) u_plane.shape = (self.frame_height, self.frame_width, 1) v_plane.shape = (self.frame_height, self.frame_width, 1) yuv = np.concatenate((y_plane, u_plane, v_plane), axis=2) # Use OpenCV to convert color since the implementation is MUCH faster if self.__convert_to_bgr: yuv = cv.cvtColor(yuv, cv.COLOR_YUV2BGR) return yuv
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bytes_to_yuv(data, resolution):\n width, height = resolution\n fwidth, fheight = raw_resolution(resolution)\n y_len = fwidth * fheight\n uv_len = (fwidth // 2) * (fheight // 2)\n if len(data) != (y_len + 2 * uv_len):\n raise PiCameraValueError(\n 'Incorrect buffer length for resolution %dx%d' % (width, height))\n # Separate out the Y, U, and V values from the array\n a = np.frombuffer(data, dtype=np.uint8)\n Y = a[:y_len].reshape((fheight, fwidth))\n Uq = a[y_len:-uv_len].reshape((fheight // 2, fwidth // 2))\n Vq = a[-uv_len:].reshape((fheight // 2, fwidth // 2))\n # Reshape the values into two dimensions, and double the size of the\n # U and V values (which only have quarter resolution in YUV4:2:0)\n U = np.empty_like(Y)\n V = np.empty_like(Y)\n U[0::2, 0::2] = Uq\n U[0::2, 1::2] = Uq\n U[1::2, 0::2] = Uq\n U[1::2, 1::2] = Uq\n V[0::2, 0::2] = Vq\n V[0::2, 1::2] = Vq\n V[1::2, 0::2] = Vq\n V[1::2, 1::2] = Vq\n # Stack the channels together and crop to the actual resolution\n return np.dstack((Y, U, V))[:height, :width]", "def rgb2yuv(r, g, b, mode='444'):\n r = 255 * r\n g = 255 * g\n b = 255 * b\n y = 00.257 * r + 0.504 * g + 0.098 * b + 16\n u = -0.148 * r - 0.291 * g + 0.439 * b + 128\n v = 00.439 * r - 0.368 * g - 0.071 * b + 128\n if mode == '420':\n y, u, v = YUV_change_mode(y, u, v, '444to420')\n return (y / 255), (u / 255), (v / 255)", "def YUV_change_mode(y, u, v, direction='420to444'):\n if direction == '420to444':\n u = np.array([cv2.resize(ch, (u.shape[2] * 2, u.shape[1] * 2), interpolation=cv2.INTER_CUBIC) for ch in u])\n v = np.array([cv2.resize(ch, (v.shape[2] * 2, v.shape[1] * 2), interpolation=cv2.INTER_CUBIC) for ch in v])\n if direction == '444to420':\n u = np.array([cv2.resize(ch, (u.shape[2] // 2, u.shape[1] // 2), interpolation=cv2.INTER_CUBIC) for ch in u])\n v = np.array([cv2.resize(ch, (v.shape[2] // 2, v.shape[1] // 2), interpolation=cv2.INTER_CUBIC) for ch in v])\n return y, u, v", "def concat_ortho_planes(planes: Sequence[np.ndarray]) -> np.ndarray:\n assert len(planes) == 3\n\n h_yx, w_yx = planes[0].shape[0], planes[0].shape[1]\n h_zx, w_zx = planes[1].shape[0], planes[1].shape[1]\n h_zy, w_zy = planes[2].shape[1], planes[2].shape[0]\n\n assert h_yx == h_zy\n assert w_yx == w_zx\n assert h_zx == w_zy\n\n height = h_yx + 1 + h_zx\n width = w_yx + 1 + w_zy\n channel = planes[0].shape[2:]\n ret = np.zeros((height, width) + channel, dtype=planes[0].dtype)\n\n # Insert yx plane in top left.\n ret[:h_yx, :w_yx] = planes[0]\n # Insert zx plane in bottom left.\n ret[-h_zx:, :w_zx] = planes[1]\n # Insert zy plane in top right, swap to align y-axis with main yx panel.\n ret[:h_zy, -w_zy:] = np.swapaxes(planes[2], 0, 1)\n\n return ret", "def rgb_to_yuv(img_rgb):\n\n r = img_rgb[..., 0]\n g = img_rgb[..., 1]\n b = img_rgb[..., 2]\n\n y = 0.299 * r + 0.587 * g + 0.114 * b\n u = 0.493 * (b - y)\n v = 0.877 * (r - y)\n\n img_yuv = np.stack((y, u, v), axis=2)\n return img_yuv", "def YUVwrite(y, u, v, path):\n if len(np.shape(y)) == 3:\n frame_num = np.shape(y)[0]\n with open(path, 'wb') as file:\n for fn in range(frame_num):\n file.write(y[fn].tobytes())\n file.write(u[fn].tobytes())\n file.write(v[fn].tobytes())\n else:\n with open(path, 'wb') as file:\n file.write(y.tobytes())\n file.write(u.tobytes())\n file.write(v.tobytes())", "def _preprocess(self, ob):\n # Take the max over prev and current frames.\n if self.last_frame is not None:\n ob_comb = np.maximum(ob, self.last_frame)\n else:\n ob_comb = ob\n self.last_frame = ob\n\n # Convert to YUV, extract Y, resize, and crop.\n r, g, b = ob_comb[:, :, 0], ob_comb[:, :, 1], ob_comb[:, :, 2]\n y = 0.299 * r + 0.587 * g + 0.114 * b\n y_resized = cv2.resize(y, (84, 110), interpolation=cv2.INTER_LINEAR)\n y_cropped = y_resized[13:-13, :]\n return y_cropped", "def next_frame(self):\n while True:\n if self.grabbed:\n buffer = self.__get_next_yuv_frame()\n if len(buffer) != self.__frame_raw_data_size:\n self.frame = False, False\n self.stopped = True\n break\n\n y, u, v = self.__extract_yuv_planes(buffer)\n\n # Save YUV planes now because they will be reshaped from (height, width) to (height, width, 1)\n\n converted_frame = self.__concatenate_planes_to_444yuv_frame(y, u, v)\n\n self.frame = True, converted_frame\n self.grabbed = False\n\n if self.stopped:\n break\n\n time.sleep(1/1000)", "def ycbcr_to_yuv(ycbcr, bit_depth=10):\n\n bit_multi = 2 ** (bit_depth - 8)\n y_coef = 219 * bit_multi\n y_offset = 16 * bit_multi\n cbcr_coef = 224 * bit_multi\n cbcr_offset = 128 * bit_multi\n\n ycbcr_tmp = ycbcr.copy()\n ycbcr_tmp[:, 0] = (ycbcr_tmp[:, 0] - y_offset) / y_coef\n ycbcr_tmp[:, 1] = (ycbcr_tmp[:, 1] - cbcr_offset) / cbcr_coef\n ycbcr_tmp[:, 2] = (ycbcr_tmp[:, 2] - cbcr_offset) / cbcr_coef\n\n return ycbcr_tmp", "def rgb2yuv(image):\n return cv2.cvtColor(image, cv2.COLOR_RGB2YUV)", "def prepare_data(cameras, frame_points_3d, frame_points_2d, keyframe_idx):\n camera_params = np.empty((0, 9))\n for c in cameras:\n R, _ = cv2.Rodrigues(c.R_mat)\n camera = build_camera(R, c.t)\n camera_params = np.append(camera_params, [camera], axis=0)\n\n camera_indices = []\n point_indices = []\n points_2d = np.empty((0, 2))\n points_3d = np.empty((0, 3))\n\n camera_id = 0\n pt_id_counter = 0\n for k, pts_2d in enumerate(frame_points_2d):\n if k > 0:\n halfway_idx = keyframe_idx[k] - keyframe_idx[k - 1] - 1\n points_2d = np.vstack((points_2d, frame_points_2d[k-1][halfway_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-1][halfway_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-1][halfway_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-1][halfway_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-1][halfway_idx])\n\n if k > 1:\n end_idx = keyframe_idx[k + 1] - keyframe_idx[k - 1] - 3\n points_2d = np.vstack((points_2d, frame_points_2d[k-2][end_idx]))\n points_3d = np.vstack((points_3d, frame_points_3d[k-2][end_idx]))\n camera_indices += [camera_id for _ in range(len(frame_points_2d[k-2][end_idx]))]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + len(frame_points_2d[k-2][end_idx]))]\n pt_id_counter = pt_id_counter + len(frame_points_2d[k-2][end_idx])\n\n points_2d = np.vstack((points_2d, frame_points_2d[k][0]))\n points_3d = np.vstack((points_3d, frame_points_3d[k][0]))\n camera_indices += [camera_id for _ in range(pts_2d.shape[1])]\n point_indices += [i for i in range(pt_id_counter, pt_id_counter + pts_2d.shape[1])]\n\n camera_id += 1\n pt_id_counter = pt_id_counter + pts_2d.shape[1]\n\n return camera_params, np.asarray(camera_indices), np.asarray(point_indices), points_3d, points_2d", "def make_movie(field='uu1', datadir='data/', proc=-1, extension='xz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.cla()\n ax.imshow(plane, vmin=amin, vmax=amax)\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)\n\n if ifirst:\n #print \"----islice----------t---------min-------max-------delta\" # Python 2\n print(\"----islice----------t---------min-------max-------delta\")\n #print \"%10i %10.3e %10.3e %10.3e %10.3e\" % \\ # Python 2\n #(islice, t, plane.min(), plane.max(), plane.max() - plane.min()) # Python 2\n print(\"{0:10} {1:10.3e} {2:10.3e} {3:10.3e} {4:10.3e}\".format(islice, t, plane.min(), plane.max(), plane.max() - plane.min()))\n\n ifirst = False\n islice += 1\n\n #print 'Making movie animation.mpg - this make take a while'\n print('Making movie animation.mpg - this make take a while')\n # SC: Not all systems use mencoder. Need to change this into ffmpeg.\n os.system(\"mencoder 'mf://_tmp*.png' -mf type=png:fps=24 -ovc lavc -lavcopts vcodec=wmv2 -oac copy -o animation.mpg\")\n os.system(\"rm _tmp*.png\")\n infile.close()", "def convert_yuv_to_rgb(img_arr): \n rgb = cv2.cvtColor(img_arr, cv2.COLOR_YUV2BGR_I420)\n rgb = cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB)\n return Image.fromarray(rgb)", "def construct_uvn_frame(n, u, b=None, flip_to_match_image=True):\n\n n = normalized(n)\n u = normalized(np.array(u) - np.dot(n, u) * n)\n v = normalized_cross(n, u)\n\n # flip to match image orientation\n if flip_to_match_image:\n if abs(u[1]) > abs(v[1]):\n u, v = v, u\n if u[0] < 0:\n u = np.negative(u)\n if v[1] < 0:\n v = np.negative(v)\n if b is None:\n if n[2] < 0:\n n = np.negative(n)\n else:\n if np.dot(n, b) > 0:\n n = np.negative(n)\n\n # return uvn matrix, column major\n return np.matrix([\n [u[0], v[0], n[0]],\n [u[1], v[1], n[1]],\n [u[2], v[2], n[2]],\n ])", "def _CropOneFrame(yuv_file, output_file, component_sizes):\n for comp_width, comp_height, comp_crop_height in component_sizes:\n for row in range(comp_height):\n # Read the plane data for this row.\n yuv_plane = yuv_file.read(comp_width)\n\n # If the plane is empty, we have reached the end of the file.\n if yuv_plane == \"\":\n return False\n\n # Only write the plane data for the rows bigger than crop_height.\n if row >= comp_crop_height:\n output_file.write(yuv_plane)\n return True", "def __init__(self, channel_id, width, height, entype, ctx,\n output_format=const.PIXEL_FORMAT_YUV_SEMIPLANAR_420):\n self._channel_id = channel_id\n self._width = width\n self._height = height\n self._run_flag = True\n self._callbak_tid = None\n self._channel_desc = None\n self._ctx = ctx\n self._entype = entype\n self._format = output_format\n self._decode_complete_cnt = 0\n self._decode_cnt = 0\n self._output_pic_size = (self._width * self._height * 3) // 2\n self._frame_queue = queue.Queue()\n self._frame_config = None", "def recreateVideoFromLapPyr(pyrVideo): \n maxLevel=len(pyrVideo)\n fNumber, H, W, chNum=pyrVideo[0].shape\n videoResult=np.zeros(pyrVideo[0].shape, dtype=np.float32)\n for fn in range(videoResult.shape[0]):\n framePyr=[pyrVideo[i][fn] for i in range(maxLevel)]\n videoResult[fn]=recreateImgsFromLapPyr(framePyr)\n \n return videoResult", "def preprocess(image):\n image = rgb2yuv(image)\n return image", "def _prepare_plane(self):\n verticies = [\n # main plane - note that the mainplane is scaled so the mat_plane\n # matrix will it transform to the correct coordinates\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], -(self.o_wh[1]-self.i_border[1])/self._scaling[1],\n (self.o_wh[0]-self.i_border[0])/self._scaling[0], self.i_border[1]/self._scaling[1],\n -self.i_border[0]/self._scaling[0], self.i_border[1]/self._scaling[1],\n\n # coord plane\n 0, 0,\n 0, -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], -self.o_wh[1],\n self.o_wh[0], 0,\n 0, 0,\n\n # axes\n 0, -self.o_wh[1], self.o_wh[0], -self.o_wh[1], #x\n 0, 0, 0, -self.o_wh[1], #y\n ]\n\n colors = [\n 1.0, 1.0, 1.0, 1.0, # outer box XXX Remove outer box...\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n 1.0, 1.0, 1.0, 1.0,\n .9, .9, .9, 9.0, # plot box\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n .9, .9, .9, 9.0,\n 0.0, 0.0, 0.0, 1.0, #lines\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n 0.0, 0.0, 0.0, 1.0,\n ]\n\n self._fonts = []\n for u in range(1, self._unit_count[0]+1):\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]+0.02)\n verticies.append(self._unit_w[0]*u)\n verticies.append(-self.o_wh[1]-0.02)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(u*(self.i_axis[0]/self._unit_count[0])-self.i_origin[0]),\n (self._unit_w[0]*u+self.i_border[0]-0.05)*self._scaling[0],\n (-self.o_wh[1]+(self.i_border[3])*0.5)\n ])\n for u in range(0, self._unit_count[1]):\n verticies.append(0.02)\n verticies.append(-self._unit_w[1]*u)\n verticies.append(-0.02)\n verticies.append(-self._unit_w[1]*u)\n colors += [0.0, 0.0, 0.0, 1.0]\n colors += [0.0, 0.0, 0.0, 1.0]\n self._fonts.append([\n '{:.2f}'.format(self.i_axis[1]-u*self.i_axis[1]/self._unit_count[1]-self.i_origin[1]),\n (0.025)*self._scaling[0],\n (-(self._unit_w[1])*u-self.i_border[1]+0.01)*self._scaling[1]\n ])\n\n self._draw_plane_indicies = (0, 12)\n self._draw_line_indicies = (12, 4+self._unit_count[0]*2+self._unit_count[1]*2)\n\n # convert data into valid data format\n verticies = numpy.array(verticies, dtype=numpy.float32)\n colors = numpy.array(colors, dtype=numpy.float32)\n\n self._plane_vao = util.VAO()\n self._plane_vbo = util.VBO(2)\n\n with self._plane_vao:\n # plane verticies\n with self._plane_vbo.get(0):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(verticies), verticies, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_position'), 2, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(0)\n\n # place vertex colors\n with self._plane_vbo.get(1):\n glBufferData(GL_ARRAY_BUFFER, ArrayDatatype.arrayByteCount(colors), colors, GL_STATIC_DRAW)\n glVertexAttribPointer(self.plane_shader.attributeLocation('vertex_color'), 4, GL_FLOAT, GL_FALSE, 0, None)\n glEnableVertexAttribArray(1)", "def parseY4MHeader(y4m):\n w = 0; h = 0; fps_num = 0; fps_denom = 0; fr = 0; fmt = \"420\"; bit_depth = 8;\n #print(\"parsing \" + y4m)\n with open(y4m, 'rb') as f:\n line = f.readline().decode('utf-8')\n #YUV4MPEG2 W4096 H2160 F30000:1001 Ip A0:0 C420p10 XYSCSS=420P10\n m = re.search(r\"W([0-9]+) H([0-9]+) F([0-9]+)\\:([0-9]+)\", line)\n if m:\n w = int(m.group(1))\n h = int(m.group(2))\n fps_num = float(m.group(3))\n fps_denom = float(m.group(4))\n fps = round(fps_num / fps_denom)\n m = re.search(r\"C([0-9]+)p([0-9]+)\", line)\n if m:\n fmt = m.group(1)\n bit_depth = int(m.group(2))\n if w == 0 or h == 0 or fps == 0:\n print(\"Failed to parse the input y4m file!\\n\")\n sys.exit()\n return (w, h, fps_num, fps_denom, fps, fmt, bit_depth)", "def yuv(self):\n r, g, b = self.rgb\n y = 0.299 * r + 0.587 * g + 0.114 * b\n return (\n y,\n 0.492 * (b - y),\n 0.877 * (r - y),\n )", "def from_planes(cls, network, layer_index, planes, labels):\n transformed = network.transform_planes(planes,\n compute_preimages=True,\n include_post=False)\n all_inputs = []\n all_labels = []\n for upolytope, label in zip(transformed, labels):\n # include_post=False so the upolytope is just a list of Numpy\n # arrays.\n points = []\n for vertices in upolytope:\n points.extend(vertices)\n # Remove duplicate points.\n points = list(set(map(tuple, points)))\n all_inputs.extend(points)\n all_labels.extend([label for i in range(len(points))])\n all_inputs, indices = np.unique(all_inputs, return_index=True, axis=0)\n all_labels = np.array(all_labels)[indices]\n return cls(network, layer_index, all_inputs, all_labels)", "def bgr_to_yuv_channels(matrix):\n yuv_matrix = cv2.cvtColor(matrix, cv2.COLOR_BGR2YUV)\n return cv2.split(yuv_matrix)", "def CropFrames(yuv_file_name, output_file_name, width, height, crop_height):\n # Component sizes = [Y_sizes, U_sizes, V_sizes].\n component_sizes = [(width, height, crop_height),\n (width/2, height/2, crop_height/2),\n (width/2, height/2, crop_height/2)]\n\n yuv_file = open(yuv_file_name, 'rb')\n output_file = open(output_file_name, 'wb')\n\n data_left = True\n while data_left:\n data_left = _CropOneFrame(yuv_file, output_file, component_sizes)\n\n yuv_file.close()\n output_file.close()", "def collate_frame_gru_fn(data):\n # Sort a data list by caption length\n if len(data[0]) == 10:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target= zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n\n video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target]\n\n frame_vec_len = len(videos_target[0][0])\n vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len)\n videos_origin_target = torch.zeros(len(videos_target), frame_vec_len)\n vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target))\n for i, frames in enumerate(videos_target):\n end = video_lengths_target[i]\n vidoes_target[i, :end, :] = frames[:end,:]\n videos_origin_target[i,:] = torch.mean(frames,0)\n vidoes_mask_target[i,:end] = 1.0\n \n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n if cap_tensor_target[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_target = [len(cap) for cap in cap_tensor_target]\n target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long()\n words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target))\n for i, cap in enumerate(cap_tensor_target):\n end = lengths_target[i]\n target_target[i, :end] = cap[:end]\n words_mask_target[i, :end] = 1.0\n else:\n target_target = None\n lengths_target = None\n words_mask_target = None\n\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n\n cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target)\n video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target)\n\n return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target\n\n elif len(data[0]) == 14:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids, videos_target, video_ids_target, cap_tensor_target, cap_bow_target, videos_source2, video_ids_source2, cap_tensor_source2, cap_bow_source2= zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n\n video_lengths_target = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_target]\n\n frame_vec_len = len(videos_target[0][0])\n vidoes_target = torch.zeros(len(videos_target), max(video_lengths_target), frame_vec_len)\n videos_origin_target = torch.zeros(len(videos_target), frame_vec_len)\n vidoes_mask_target = torch.zeros(len(videos_target), max(video_lengths_target))\n for i, frames in enumerate(videos_target):\n end = video_lengths_target[i]\n vidoes_target[i, :end, :] = frames[:end,:]\n videos_origin_target[i,:] = torch.mean(frames,0)\n vidoes_mask_target[i,:end] = 1.0\n\n video_lengths_source2 = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos_source2]\n\n frame_vec_len = len(videos_source2[0][0])\n vidoes_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2), frame_vec_len)\n videos_origin_source2 = torch.zeros(len(videos_source2), frame_vec_len)\n vidoes_mask_source2 = torch.zeros(len(videos_source2), max(video_lengths_source2))\n for i, frames in enumerate(videos_source2):\n end = video_lengths_source2[i]\n vidoes_source2[i, :end, :] = frames[:end,:]\n videos_origin_source2[i,:] = torch.mean(frames,0)\n vidoes_mask_source2[i,:end] = 1.0\n\n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n if cap_tensor_target[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_target = [len(cap) for cap in cap_tensor_target]\n target_target = torch.zeros(len(cap_tensor_target), max(lengths_target)).long()\n words_mask_target = torch.zeros(len(cap_tensor_target), max(lengths_target))\n for i, cap in enumerate(cap_tensor_target):\n end = lengths_target[i]\n target_target[i, :end] = cap[:end]\n words_mask_target[i, :end] = 1.0\n else:\n target_target = None\n lengths_target = None\n words_mask_target = None\n\n if cap_tensor_source2[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths_source2 = [len(cap) for cap in cap_tensor_source2]\n target_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2)).long()\n words_mask_source2 = torch.zeros(len(cap_tensor_source2), max(lengths_source2))\n for i, cap in enumerate(cap_tensor_source2):\n end = lengths_source2[i]\n target_source2[i, :end] = cap[:end]\n words_mask_source2[i, :end] = 1.0\n else:\n target_source2 = None\n lengths_source2 = None\n words_mask_source2 = None\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n cap_bow_target = torch.stack(cap_bow_target, 0) if cap_bow_target[0] is not None else None\n cap_bow_source2 = torch.stack(cap_bow_source2, 0) if cap_bow_source2[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n text_data_target = (target_target, cap_bow_target, lengths_target, words_mask_target)\n video_data_target = (vidoes_target, videos_origin_target, video_lengths_target, vidoes_mask_target)\n text_data_source2 = (target_source2, cap_bow_source2, lengths_source2, words_mask_source2)\n video_data_source2 = (vidoes_source2, videos_origin_source2, video_lengths_source2, vidoes_mask_source2)\n\n\n return video_data, text_data, idxs, cap_ids, video_ids, video_ids_target, video_data_target, text_data_target, video_ids_source2, video_data_source2, text_data_source2\n\n\n else:\n if data[0][1] is not None:\n data.sort(key=lambda x: len(x[1]), reverse=True)\n videos, captions, cap_bows, idxs, cap_ids, video_ids = zip(*data)\n\n # Merge videos (convert tuple of 1D tensor to 4D tensor)\n video_lengths = [min(VIDEO_MAX_LEN,len(frame)) for frame in videos]\n frame_vec_len = len(videos[0][0])\n vidoes = torch.zeros(len(videos), max(video_lengths), frame_vec_len)\n videos_origin = torch.zeros(len(videos), frame_vec_len)\n vidoes_mask = torch.zeros(len(videos), max(video_lengths))\n for i, frames in enumerate(videos):\n end = video_lengths[i]\n vidoes[i, :end, :] = frames[:end,:]\n videos_origin[i,:] = torch.mean(frames,0)\n vidoes_mask[i,:end] = 1.0\n \n if captions[0] is not None:\n # Merge captions (convert tuple of 1D tensor to 2D tensor)\n lengths = [len(cap) for cap in captions]\n target = torch.zeros(len(captions), max(lengths)).long()\n words_mask = torch.zeros(len(captions), max(lengths))\n for i, cap in enumerate(captions):\n end = lengths[i]\n target[i, :end] = cap[:end]\n words_mask[i, :end] = 1.0\n else:\n target = None\n lengths = None\n words_mask = None\n\n\n cap_bows = torch.stack(cap_bows, 0) if cap_bows[0] is not None else None\n\n video_data = (vidoes, videos_origin, video_lengths, vidoes_mask)\n text_data = (target, cap_bows, lengths, words_mask)\n\n return video_data, text_data, idxs, cap_ids, video_ids", "def planes_3d(self, quantity, xplane, yplane):\n xplane = int(xplane)\n yplane = int(yplane)\n # Get the scalar values\n # Get the data on the plane with a fixed x value. These means we'll\n # have changing (y, z) points\n xdata = self.get_plane(quantity, 'yz', xplane)\n # z first cuz we want y to be changing before z to correspond with the\n # way numpy flattens arrays. Note this means y points will be in the\n # 2nd column\n xplanepoints = np.array(list(itertools.product(self.Z, self.Y)))\n xdata = xdata.flatten()\n xplanexval = np.array(list(itertools.repeat(x[xplane], len(xdata))))\n xplanedata = np.zeros((xplanepoints.shape[0], 4))\n xplanedata[:, 0] = xplanexval\n xplanedata[:, 1] = xplanepoints[:, 1]\n xplanedata[:, 2] = xplanepoints[:, 0]\n xplanedata[:, 3] = xdata\n # Same procedure for fixed y plane\n ydata = self.get_plane(quantity, 'xz', yplane)\n yplanepoints = np.array(list(itertools.product(z, x)))\n ydata = ydata.flatten()\n yplaneyval = np.array(list(itertools.repeat(y[yplane], len(ydata))))\n yplanedata = np.zeros((yplanepoints.shape[0], 4))\n yplanedata[:, 0] = yplanepoints[:, 1]\n yplanedata[:, 1] = yplaneyval\n yplanedata[:, 2] = yplanepoints[:, 0]\n yplanedata[:, 3] = ydata\n labels = ('X [um]', 'Y [um]', 'Z [um]', quantity)\n # Now stack them vertically and plot!\n all_data = np.vstack((xplanedata, yplanedata))\n self.scatter3d(all_data[:, 0], all_data[:, 1], all_data[:, 2],\n all_data[:, 3], labels, 'planes_3d')", "def make_movie_crossflow(field='uu1', datadir='data/', proc=-1, extension='yz',\n format='native', tmin=0., tmax=1.e38, amin=0., amax=1.,\n transform='', oldfile=False):\n\n import pylab as plt\n import matplotlib.patches as patches\n\n datadir = os.path.expanduser(datadir)\n if proc < 0:\n filename = datadir + '/slice_' + field + '.' + extension\n else:\n filename = datadir + '/proc' + \\\n str(proc) + '/slice_' + field + '.' + extension\n\n # Read the global dimensions.\n dim = read_dim(datadir, proc)\n if dim.precision == 'D':\n precision = 'd'\n else:\n precision = 'f'\n\n # Set up slice plane.\n if extension == 'xy' or extension == 'Xy':\n hsize = dim.nx\n vsize = dim.ny\n if extension == 'xz':\n hsize = dim.nx\n vsize = dim.nz\n if extension == 'yz':\n hsize = dim.ny\n vsize = dim.nz\n plane = np.zeros((vsize, hsize), dtype=precision)\n\n infile = npfile(filename, endian=format)\n\n files = []\n fig = plt.figure(figsize=(5, 10))\n ax = fig.add_subplot(111)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n#\n# ax.add_patch(patches.Rectangle(\n# (220,0),\n# 80,\n# 240,\n# hatch='/'\n# )\n# )\n\n ifirst = True\n islice = 0\n while True:\n try:\n raw_data = infile.fort_read(precision)\n except ValueError:\n break\n except TypeError:\n break\n\n if oldfile:\n t = raw_data[-1]\n plane = raw_data[:-1].reshape(vsize, hsize)\n else:\n t = raw_data[-2]\n plane = raw_data[:-2].reshape(vsize, hsize)\n\n if transform:\n exec('plane = plane' + transform)\n\n if t > tmin and t < tmax:\n ax.cla()\n ax.imshow(plane, vmin=amin, vmax=amax)\n ax.add_patch(patches.Rectangle(\n (220,0),\n 40,\n 320,\n color='gray'\n )\n )\n fname = '_tmp%03d.png' % islice\n print('Saving frame' + fname)\n fig.savefig(fname)\n files.append(fname)", "def __get_next_yuv_frame(self):\n raw_frame_buffer = self.__file_object.read(self.__frame_raw_data_size)\n\n # Ignore FRAME header\n self.__file_object.readline()\n return raw_frame_buffer", "def CreateFrame (audioSampleX, audioSampleY, statusList, statusCursor, bitDepth):\n if bitDepth == 3:\n if statusCursor == 0:\n FirstSubFrame = [b'0xD8', audioSampleX, 1, 0, int(statusList[statusCursor]), 1]\n else:\n FirstSubFrame = [b'0xD2', audioSampleX, 1, 0, int(statusList[statusCursor]), 1]\n SubFrameY = [b'0xD4', audioSampleY, 1, 0, int(statusList[statusCursor]), 1]\n Frame = [FirstSubFrame, SubFrameY]\n else: # Frame with less than 24 bits sample\n if statusCursor == 0:\n FirstSubFrame = [b'0xD8', b'0x0', audioSampleX, 1, 0, int(statusList[statusCursor]), 1]\n else:\n FirstSubFrame = [b'0xD2', b'0x0', audioSampleX, 1, 0, int(statusList[statusCursor]), 1]\n SubFrameY = [b'0xD4', b'0x0', audioSampleY, 1, 0, int(statusList[statusCursor]), 1]\n Frame = [FirstSubFrame, SubFrameY]\n print(Frame)\n return Frame", "def buildVideoLapPyr(frames, maxLevel): \n pyr0=buildLaplacianPyramid(frames[0], maxLevel)\n realMaxLevel=len(pyr0)\n \n resultList=[]\n for i in range(realMaxLevel):\n curPyr = np.zeros([len(frames)]+list(pyr0[i].shape), dtype=np.float32)\n resultList.append(curPyr)\n \n for fn in range(len(frames)):\n pyrOfFrame = buildLaplacianPyramid(frames[fn], maxLevel)\n for i in range(realMaxLevel):\n resultList[i][fn]=pyrOfFrame[i]\n \n return resultList" ]
[ "0.6178987", "0.6112994", "0.60437536", "0.60061175", "0.5818813", "0.5782524", "0.5594121", "0.5592523", "0.55896294", "0.5515905", "0.54654443", "0.54456806", "0.5360084", "0.5344593", "0.5332781", "0.5281292", "0.52807987", "0.5253911", "0.5192978", "0.51899076", "0.5174774", "0.51724017", "0.51544124", "0.51364696", "0.5127803", "0.50986224", "0.5092052", "0.5074001", "0.5068511", "0.5054257" ]
0.70961094
0
Class to train and evaluate a Base Cluster Class with Number of Clusters Specified evaluate_by = column name to use to compare across the clusters eventually
def __init__(self, X, n_clusters=2, evaluate_by=None, scaled=True, random_state=101, space=None, const_params=None, loss_fun=None): self.evaluate_by = evaluate_by if (self.evaluate_by is not None): self.evaluate_by_values = X[self.evaluate_by] self.X = X.helper.drop_columns([self.evaluate_by]) else: self.X = X self.n_clusters = n_clusters self.scaled = scaled self.random_state = random_state self.space = space self.const_params = const_params self.loss_fun = loss_fun self.objective = None self.best_params = None self.cluster_obj = None # Define in child class self.labels = None self.silhoutte_score = None self.merged_data = None self.merged_scaled_data = None self.columns = self.X.columns std_scl = StandardScaler() self.X_scaled = pd.DataFrame(std_scl.fit_transform(self.X), columns=self.columns)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def _eval_classifier(self):\n\n y_pred_baseline = self.df_baseline[self.score_column]\n y_pred_sample = self.df_sample[self.score_column]\n\n y_label_baseline = self.df_baseline[self.label_column]\n y_label_sample = self.df_sample[self.label_column]\n\n precision_baseline = precision_score(y_label_baseline, y_pred_baseline)\n recall_baseline = recall_score(y_label_baseline, y_pred_baseline)\n acc_baseline = accuracy_score(y_label_baseline, y_pred_baseline)\n f1_baseline = f1_score(y_label_baseline, y_pred_baseline)\n try:\n auc_baseline = roc_auc_score(y_label_baseline, y_pred_baseline)\n except ValueError:\n auc_baseline = \"NA\"\n\n precision_sample = precision_score(y_label_sample, y_pred_sample)\n recall_sample = recall_score(y_label_sample, y_pred_sample)\n acc_sample = accuracy_score(y_label_sample, y_pred_sample)\n f1_sample = f1_score(y_label_sample, y_pred_sample)\n try:\n auc_sample = roc_auc_score(y_label_sample, y_pred_sample)\n except ValueError:\n auc_sample = \"NA\"\n\n metrics_df = pd.DataFrame(\n {\n \"Accuracy\": [acc_baseline, acc_sample],\n \"Precision\": [precision_baseline, precision_sample],\n \"Recall\": [recall_baseline, recall_sample],\n \"F1\": [f1_baseline, f1_sample],\n \"AUC\": [auc_baseline, auc_sample],\n },\n index=[\"baseline\", \"sample\"],\n )\n\n self.performance_comparison = metrics_df", "def __init__(self, rank=10, clusters=1, iterations=3, metric='euclidean'):\n\n sk_kmeans.__init__(self, n_clusters=clusters, max_iter=iterations)\n # Cluster ranks is a list of lists of knn sorted elements for each cluster w.r.t. the cluster mean\n self.rank = rank\n self.metric = metric", "def evaluation(X_selected, X_test, n_clusters, y):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=1)\n\n k_means.fit(X_selected)\n y_predict = k_means.predict(X_test)\n\n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict, average_method='arithmetic')\n\n sil = silhouette_score(X_test, y_predict, metric=\"euclidean\")\n db_score = davies_bouldin_score(X_test, y_predict)\n ch_score = calinski_harabasz_score(X_test, y_predict)\n purity = calcolaPurity(y, y_predict)\n\n return nmi, sil, db_score, ch_score, purity", "def final_evaluation(self):\n print(\"\\n=== Assign final labels ===\")\n\n # skip if there is only one cluster\n n_cluster = len(set(self.label[self.label != -1]))\n if n_cluster < 2:\n print(f\"Skip final evaluation because only {n_cluster} cluster label exist.\")\n # name all cluster as c0\n self.label = np.zeros(self.label.size, dtype=int)\n self.cv_predicted_label = [f\"c{label}\" for label in self.label]\n self.label_proba = np.ones(self.label.size, dtype=int)\n self.final_accuracy = 1\n else:\n # predict outliers\n outlier_x = self.X[self.label == -1]\n outlier_idx = np.where(self.label == -1)[0]\n if len(outlier_idx) != 0:\n outlier_predict = pd.Series(self.supervise_model.predict(outlier_x), index=outlier_idx)\n for cell, pred_label in outlier_predict.items():\n self.label[cell] = pred_label\n print(\n \"Assigned all the multi-leiden clustering outliers into clusters \"\n \"using the prediction model from final clustering version.\"\n )\n\n # final evaluation of non-outliers using cross val predict\n final_predict_proba = cross_val_predict(\n self.supervise_model,\n self.X,\n y=self.label,\n method=\"predict_proba\",\n n_jobs=self.n_jobs,\n verbose=0,\n cv=10,\n )\n final_predict = pd.Series(np.argmax(final_predict_proba, axis=1))\n final_cell_proba = pd.Series(np.max(final_predict_proba, axis=1))\n final_acc = balanced_accuracy_score(self.label, final_predict.values)\n print(f\"Final ten-fold CV Accuracy on all the cells: {final_acc:.3f}\")\n self.cv_predicted_label = [f\"c{label}\" for label in final_predict]\n self.label_proba = final_cell_proba.values\n self.final_accuracy = final_acc\n\n self.label = [f\"c{label}\" for label in self.label]\n return", "def run_evaluation(self, n_runs=1, n_points=1000, n_iterations=1, min_n_components=2, max_n_components=25,\n\t\t\t\t\t n_splits=3, save_data=False, file_label='',n_microstates=None, all_methods=True,\n\t\t\t\t\t assign_transition_points=True):\n\n\t\tif self.presampled_data is not None:\n\t\t\tsampled_data = self.presampled_data[0]\n\t\t\ttrue_clustering = self.presampled_data[1]\n\t\t\tn_runs = sampled_data.shape[0]\n\n\t\tself.cluster_score_ami_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_ami_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_fm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_fm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tself.cluster_score_vm_kmeans_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_AW_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_spectral_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_density_peaks_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_ = np.zeros(n_runs)\n\t\tself.cluster_score_vm_GMM_FE_min_ = np.zeros(n_runs)\n\n\t\tdata = self.toy_model_.sample(3)\n\n\t\t# Create free energy estimators\n\t\tgmm_FE = GMM_FE.FreeEnergyClustering(data, min_n_components=min_n_components, max_n_components=max_n_components,\n\t\t\t\t\t\t\t\t\t x_lims=self.x_lims_, n_grids=self.n_grids_, stack_landscapes=False,\n\t\t\t\t\t\t\t\t\t n_splits=n_splits, n_iterations=n_iterations,convergence_tol=self.convergence_tol_,\n\t\t\t\t\t\t\t\t\t verbose=self.verbose_)\n\n\t\tkm = kmc.KMeansCluster(min_n_components, max_n_components)\n\t\taw = awc.AWCluster(min_n_components, max_n_components)\n\t\tspectral = sc.SpectralCluster(min_n_components, max_n_components)\n\n\t\tall_data = []\n\t\tfor i_run in range(n_runs):\n\t\t\tprint(\"Run: \"+str(i_run+1)+'/'+str(n_runs))\n\n\t\t\tif self.presampled_data is None:\n\t\t\t\t# Sample data\n\t\t\t\tdata = self.toy_model_.sample(n_points)\n\t\t\telse:\n\t\t\t\tdata = sampled_data[i_run]\n\t\t\t\n\t\t\tall_data.append(data)\n\n\t\t\tprint('Shape data: ' + str(data.shape))\n\n\t\t\t# Set data in model and estimate GMM density\n\t\t\tgmm_FE.data_ = data\n\t\t\tcoords, est_FE_landsc, FE_points = gmm_FE.landscape()\n\n\t\t\t# Get true cluster labels\n\t\t\tif self.presampled_data is None:\n\t\t\t\tif hasattr(self.toy_model_, \"assign_cluster_labels\"):\n\t\t\t\t\tself.true_labels_ = self.toy_model_.assign_cluster_labels(data)\n\t\t\t\telse:\n\t\t\t\t\tprint('Setting true labels.')\n\t\t\t\t\tself.true_labels_, _ = self.true_FE_.cluster(data, np.zeros(data.shape[0]))\n\t\t\telse:\n\t\t\t\tself.true_labels_ = true_clustering[i_run]\n\t\t\t\n\t\t\t# Cluster data with different methods\n\t\t\tif n_microstates is None:\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(data, FE_points, assign_transition_points=assign_transition_points)\n\t\t\telse:\n\t\t\t\tkmea = KMeans(n_clusters=n_microstates).fit(data[::2])\n\t\t\t\tmicrostate_centers = kmea.cluster_centers_\n\t\t\t\tself.FE_min_labels, _ = gmm_FE.cluster(microstate_centers, FE_points, data, assign_transition_points=assign_transition_points, unravel_grid=False)\n\n\t\t\tif all_methods:\n\t\t\t\tself.km_labels = km.cluster(data)\n\t\t\t\tself.aw_labels = aw.cluster(data)\n\t\t\t\tself.spectral_labels = spectral.cluster(data)\n\n\t\t\t# Score clustering using different scoring metrics\n\t\t\t# V-measure score\n\t\t\tself.cluster_score_vm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'vm')\n\t\t\tprint(self.cluster_score_vm_GMM_FE_min_[i_run])\n\t\t\tif all_methods:\n\t\t\t\tself.cluster_score_vm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'vm')\n\t\t\t\tself.cluster_score_vm_kmeans_[i_run] = self._score_clustering(self.km_labels,'vm')\n\t\t\t\tself.cluster_score_vm_AW_[i_run] = self._score_clustering(self.aw_labels,'vm')\n\t\t\t\tself.cluster_score_vm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'vm')\n\n\t\t\t\t# Adjusted MI\n\t\t\t\tself.cluster_score_ami_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'ami')\n\t\t\t\tself.cluster_score_ami_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'ami')\n\t\t\t\tself.cluster_score_ami_kmeans_[i_run] = self._score_clustering(self.km_labels,'ami')\n\t\t\t\tself.cluster_score_ami_AW_[i_run] = self._score_clustering(self.aw_labels,'ami')\n\t\t\t\tself.cluster_score_ami_spectral_[i_run] = self._score_clustering(self.spectral_labels,'ami')\n\n\t\t\t\t# Fowlkes Mallows\n\t\t\t\tself.cluster_score_fm_GMM_FE_min_[i_run] = self._score_clustering(self.FE_min_labels,'fm')\n\t\t\t\tself.cluster_score_fm_GMM_[i_run] = self._score_clustering(gmm_FE.density_est_.predict(data),'fm')\n\t\t\t\tself.cluster_score_fm_kmeans_[i_run] = self._score_clustering(self.km_labels,'fm')\n\t\t\t\tself.cluster_score_fm_AW_[i_run] = self._score_clustering(self.aw_labels,'fm')\n\t\t\t\tself.cluster_score_fm_spectral_[i_run] = self._score_clustering(self.spectral_labels,'fm')\n\t\t\n\t\tif save_data:\n\t\t\tif self.presampled_data is None:\n\t\t\t\tnp.save('data_out/sampled_data_'+self.toy_model_.name+file_label+'.npy',all_data)\n\n\t\t\tif False:\n\t\t\t\tnp.save('data_out/cluster_score_fm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_fm_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_kmeans_' + self.toy_model_.name +file_label +'.npy', self.cluster_score_fm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_fm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_fm_spectral_)\n\n\t\t\t\tnp.save('data_out/cluster_score_ami_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_ami_GMM_FE_min_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_ami_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_ami_spectral_)\n\n\t\t\tnp.save('data_out/cluster_score_vm_FE_min_'+self.toy_model_.name+file_label+'.npy',self.cluster_score_vm_GMM_FE_min_)\n\t\t\tif all_methods:\n\t\t\t\tnp.save('data_out/cluster_score_vm_GMM_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_GMM_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_kmeans_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_kmeans_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_AW_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_AW_)\n\t\t\t\tnp.save('data_out/cluster_score_vm_spectral_' + self.toy_model_.name + file_label+'.npy', self.cluster_score_vm_spectral_)\n\t\treturn", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def main():\n # Read in trainingSet and testSet as a DataFrame\n trainingOriginal = pd.read_csv(\n filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.train.5fold.csv\")\n testOriginal = pd.read_csv(filepath_or_buffer=\"~/Desktop/KNN Implementation/adult.test.csv\")\n\n # Select only the numeric data\n training = pd.DataFrame(trainingOriginal.select_dtypes(['number']))\n training = pd.concat([training.reset_index(drop=True),\n trainingOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Select only the numeric data\n test = pd.DataFrame(testOriginal.select_dtypes(['number']))\n test = pd.concat([test.reset_index(drop=True),\n testOriginal['earns'].reset_index(drop=True)], axis=1)\n\n # Normalize the columns for training and test\n # print training['age'].min()\n # print training['age'].max()\n # print training.head()\n\n # Run max-min normalization on numerical columns for testing and training data\n for i in range(6):\n training.iloc[:, i] = (training.iloc[:, i]- training.iloc[:, i].min())/(training.iloc[:, i].max() - training.iloc[:, i].min())\n test.iloc[:, i] = (test.iloc[:, i]- test.iloc[:, i].min())/(test.iloc[:, i].max() - test.iloc[:, i].min())\n\n # Convert the 'earns' column to boolean as follows\n training['earns'] = training['earns'] == '>50K'\n test['earns'] = test['earns'] == ' >50K'\n\n # Group the training set by the fold attribute as given by the dataset\n trainingForFinal = training\n training = training.groupby('fold')\n\n # Since we want to consider odd k-values from 1 to 39, construct a list with these values\n kList = []\n for i in range(40):\n if i % 2 == 1:\n kList.append(i)\n\n # Empty dictionary to hold performance of each k-values and its accuracy\n performance = {}\n\n # Compute the performance for each k-value\n for k in kList:\n performance = crossValidation(training, k, performance)\n\n # Sort the performance dictionary by its accuracy (value)\n performance = sorted(performance.items(), key=operator.itemgetter(1), reverse=True)\n\n # Open file to write results\n file = open('grid.results.txt', 'w')\n # Write the results to file\n file.write(\"K | Accuracy\\n\")\n for item in performance:\n if item[0] < 10:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n else:\n file.write(str(item[0]) + ' | ' + str(item[1]) + '\\n')\n # Close file\n file.close()\n\n # The best K is the one at the top of the list after the sorting\n bestK = performance[0][0]\n\n print 'Running Test Set with K = ' + str(bestK)\n\n applyModel(test,trainingForFinal,bestK)", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def dataClust(resAttrDF, infCol = 'Dollars', resName = None):\n \n if resName is None:\n raise Exception('**** RESTAURANT NAME WAS NOT PROVIDED ****')\n \n ## COPY AND PREPROCESS RESTAURANT ATTRIBUTE DATA\n print(f'\\n**** PREPROCESSING AND CLUSTERING DATA ACCORDING TO...{infCol.upper()} COLUMN ****')\n\n k_clust = resAttrDF.copy()\n k_clust = k_clust.reset_index(drop = True)\n \n labelEncoder = LabelEncoder()\n k_clust['Name'] = labelEncoder.fit_transform(k_clust['Name'])\n for col in k_clust.columns:\n if k_clust[col].dtypes == 'object':\n k_clust[col] = pd.to_numeric(k_clust[col])\n\n kprot_data = k_clust.copy()\n for c in k_clust.select_dtypes(exclude='object').columns:\n pt = PowerTransformer()\n kprot_data[c] = pt.fit_transform(np.array(kprot_data[c]).reshape(-1, 1))\n\n categorical_columns = [0] ## MAKE SURE TO SPECIFY CURRECT INDICES\n\n ## ACTUAL CLUSTERING\n if infCol != 'Dollars':\n kproto = KPrototypes(n_clusters= len(k_clust[infCol].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns)\n else:\n kproto = KPrototypes(n_clusters= len(k_clust['Dollars'].unique()), init='Cao', n_jobs = 4)\n clusters = kproto.fit_predict(kprot_data, categorical=categorical_columns) \n\n ## PRINT COUNT OF EACH CLUSTER GROUP\n print('The count for each cluster group is printed below')\n pd.Series(clusters).value_counts()\n \n ## EVALUATE CLUSTER ACCURACY WITH LGBMCLASSIFIER\n clf_kp = lgb.LGBMClassifier(colsample_by_tree=0.8, random_state=1)\n cv_scores_kp = cross_val_score(clf_kp, k_clust, clusters, scoring='f1_weighted')\n print(f'CV F1 score for K-Prototypes clusters is {np.mean(cv_scores_kp)}')\n\n ## PLOT INFLUENTIAL COLOUMNS\n clf_kp.fit(k_clust, clusters)\n explainer_kp = shap.TreeExplainer(clf_kp)\n shap_values_kp = explainer_kp.shap_values(k_clust)\n shap.summary_plot(shap_values_kp, k_clust, plot_type=\"bar\", plot_size=(15, 10))\n\n ## ADD CLUSTERS TO ORIGINAL DATAFRAME AND INVERSE LABEL ENCODE RESTAURANT NAMES\n k_clust['Cluster'] = clusters\n k_clust['Name'] = labelEncoder.inverse_transform(k_clust['Name'])\n\n ## FILTER RESTAURNAT CLUSTER OF CHOICE\n clusterVal = clusters[list(k_clust['Name']).index(resName)]\n k_clust = k_clust[k_clust['Cluster'] == clusterVal]\n k_clust = k_clust.reset_index(drop = True)\n k_clust = k_clust[['Name', 'ZipCode', 'Dollars', 'Photos']]\n\n print('**** CLUSTERING COMPLETED AND SAVING CLUSTER DATAFRAME LOCALLY ****\\n')\n resFileName = resName.replace(' ', '_')\n fileName = f'{resFileName.upper()}_CLUSTER_DATA.csv'\n k_clust.to_csv(fileName)\n\n return k_clust", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def test_determine_k(self):\n test_dir_name = os.path.dirname(__file__)\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"four_clusters.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\"x\", \"y\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 4)\n\n feat_array_fn = os.path.join(\n test_dir_name, \"data\", \"iris.csv\")\n df = pd.read_csv(feat_array_fn)\n feat_array = df[[\n \"Sepal.Length\", \"Sepal.Width\", \"Petal.Length\",\n \"Petal.Width\"]].values\n\n clusterer = Clusterer(feat_array_fn, \"/dev/null\", [])\n best_k = clusterer._determine_k(feat_array, 9)\n\n self.assertEqual(best_k, 2)", "def evaluate(clf, dataset, feature_list, features, labels, num_iter, params):\n\n features_train, features_test, labels_train, labels_test = \\\n train_test_split(features, labels, test_size=0.3, random_state=42)\n\n\n\n precision_values = []\n recall_values = []\n accuracy_values = []\n print clf\n for i in xrange(0, num_iter):\n #print params\n clf = GridSearchCV(clf, params)\n clf.fit(features_train, labels_train)\n print '*****************************'\n print clf.best_estimator_\n print clf.best_params_\n\n clf = clf.best_estimator_\n #test_classifier(clf, dataset, feature_list)\n pred = clf.predict(features_test)\n precision_values.append(precision_score(labels_test, pred))\n recall_values.append(recall_score(labels_test, pred))\n accuracy_values.append(accuracy_score(labels_test, pred))\n print 'Recall score: ', mean(recall_values)\n print 'Precision score: ', mean(precision_values)\n print 'Accuracy score: ' , mean(accuracy_values)", "def supervise_learning(self):\n if self._multi_leiden_clusters is None:\n raise ValueError(\n \"Run multi_leiden_clustering first to get a \" \"clustering assignment before run supervise_learning.\"\n )\n\n n_cluster = np.unique(self._multi_leiden_clusters[self._multi_leiden_clusters != -1]).size\n if n_cluster == 1:\n print(\"There is only one cluster except for outliers, can not train supervise model on that.\")\n self.label = np.zeros(self.n_obs, dtype=int)\n return\n print(\"\\n=== Start supervise model training and cluster merging ===\")\n\n x = self.X\n cur_y = self._multi_leiden_clusters.copy()\n score = None\n step = 0.1\n\n if self.supervise_model is None:\n # create default model if no model provided\n clf = self._create_model(n_estimators=500)\n else:\n clf = self.supervise_model\n for cur_iter in range(1, self.max_iter + 1):\n print(f\"\\n=== iteration {cur_iter} ===\")\n n_labels = np.unique(cur_y[cur_y != -1]).size\n print(f\"{n_labels} non-outlier labels\")\n if n_labels < 2:\n print(f\"Stop iteration because only {n_labels} cluster remain.\")\n break\n\n x_train, y_train, x_test, y_test = _split_train_test_per_group(\n x=x,\n y=cur_y,\n frac=self.train_frac,\n max_train=self.train_max_n,\n random_state=self.random_state + cur_iter,\n # every time train-test split got a different random state\n )\n (\n clf,\n score,\n cluster_map,\n cmat,\n r1_cmat,\n r2_cmat,\n ) = single_supervise_evaluation(\n clf,\n x_train,\n y_train,\n x_test,\n y_test,\n r1_norm_step=step,\n r2_norm_step=step,\n )\n step = min(0.2, max(0.05, 2 * (self.target_accuracy - score)))\n\n # save step data for plotting\n self.step_data[cur_iter] = [\n cur_y,\n cmat,\n r1_cmat,\n r2_cmat,\n cluster_map,\n score,\n step,\n ]\n\n if score > self.target_accuracy:\n print(\n f\"Stop iteration because current accuracy {score:.3f}\"\n f\" > target accuracy {self.target_accuracy:.3f}.\"\n )\n break\n\n # judge results\n if len(cluster_map) > 0:\n print(f\"Merging {len(cluster_map)} clusters.\")\n cur_y = pd.Series(cur_y).apply(lambda i: cluster_map[i] if i in cluster_map else i)\n # renumber labels from large to small\n ordered_map = {c: i for i, c in enumerate(cur_y[cur_y != -1].value_counts().index)}\n cur_y = pd.Series(cur_y).apply(lambda i: ordered_map[i] if i in ordered_map else i).values\n else:\n print(\"Stop iteration because there is no cluster to merge\")\n break\n else:\n print(\"Stop iteration because reaching maximum iteration.\")\n self._label_with_leiden_outliers = cur_y\n self.label = cur_y\n self.supervise_model = clf\n self.final_accuracy = score\n return", "def create_cluster(df,validate, test, X, k, name):\n \n scaler = StandardScaler(copy=True).fit(df[X])\n X_scaled = pd.DataFrame(scaler.transform(df[X]), columns=df[X].columns.values).set_index([df[X].index.values])\n kmeans = KMeans(n_clusters = k, random_state = 42)\n kmeans.fit(X_scaled)\n kmeans.predict(X_scaled)\n df[name] = kmeans.predict(X_scaled)\n df[name] = 'cluster_' + df[name].astype(str)\n \n v_scaled = pd.DataFrame(scaler.transform(validate[X]), columns=validate[X].columns.values).set_index([validate[X].index.values])\n validate[name] = kmeans.predict(v_scaled)\n validate[name] = 'cluster_' + validate[name].astype(str)\n \n t_scaled = pd.DataFrame(scaler.transform(test[X]), columns=test[X].columns.values).set_index([test[X].index.values])\n test[name] = kmeans.predict(t_scaled)\n test[name] = 'cluster_' + test[name].astype(str)\n \n centroids = pd.DataFrame(scaler.inverse_transform(kmeans.cluster_centers_), columns=X_scaled.columns)\n return df, X_scaled, scaler, kmeans, centroids", "def knn_classification(x_test, df_training, attrib_column, k):\n return majority_vote(k_nearest_neighbors(x_test, df_training,k),df,attrib_column)", "def classify():\n yes_dataset = df[df[\"_class\"] == 1] # 470588\n no_dataset = df[df[\"_class\"] == 0] # 1971\n\n parameter_analysis = list()\n for criterion in np.arange(0.05, 0.91, 0.05):\n print(\"doing experiment at criterion = %s ...\" % criterion)\n rate_list = list()\n for i in range(10):\n # shuffle yes_dataset and no_dataset, so we can randomly choose 90% yes_dataset\n # + 90% no_dataset as train dataset, 10% yes_dataset + 10% no_dataset as test dataset\n yes_index = yes_dataset.index.tolist()\n random.shuffle(yes_index)\n no_index = no_dataset.index.tolist()\n random.shuffle(no_index)\n \n # concatenate 90%yes + 90%no, 10%yes + 10%no\n train = pd.concat([\n yes_dataset.loc[yes_index[:1774], :],\n no_dataset.loc[no_index[:423530], :]\n ])\n test = pd.concat([\n yes_dataset.loc[yes_index[1774:], :],\n no_dataset.loc[no_index[423530:], :]\n ]) \n \n # split data and label\n train_data, train_label = (train[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n train[\"_class\"])\n test_data, test_label = (test[[\"Revenue.Code\", \n \"Service.Code\", \n \"Procedure.Code\", \n \"Diagnosis.Code\", \n \"Subscriber.Index\"]], \n test[\"_class\"])\n \n # apply classifier\n clf = GaussianNB()\n clf.fit(train_data, train_label)\n probability = clf.predict_proba(test_data).T[1]\n \n result = pd.DataFrame()\n result[\"_class\"] = test_label\n result[\"_predict\"] = probability >= criterion\n \n result_yes = result[result[\"_class\"] == 1]\n yes_yes_rate = sum(result_yes[\"_class\"] == result_yes[\"_predict\"])/len(result_yes[\"_predict\"])\n \n result_no = result[result[\"_class\"] == 0]\n no_no_rate = sum(result_no[\"_class\"] == result_no[\"_predict\"])/len(result_no[\"_predict\"])\n \n rate_list.append((yes_yes_rate, no_no_rate))\n\n rate_list = pd.DataFrame(rate_list)\n yes_yes_rate, no_no_rate = rate_list.mean()[0], rate_list.mean()[1]\n parameter_analysis.append((criterion, yes_yes_rate, no_no_rate))\n \n # save data to excel spreadsheet\n parameter_analysis = pd.DataFrame(parameter_analysis, columns=[\"criterion\", \"yes_yes_rate\", \"no_no_rate\"])\n writer = pd.ExcelWriter(\"parameter_analysis.xlsx\")\n parameter_analysis.to_excel(writer, \"parameter_analysis\", index=False)\n writer.save()", "def __init__(self,\n n_splits_outer=5,\n gridsearch='half',\n n_components=3,\n classifier='svm',\n cv=5,\n min_resources='smallest',\n factor=3,\n score_func=metrics.f1_score,\n average='weighted',\n random_state=None,\n n_jobs=-1,\n **kwargs,\n ):\n\n self.outer_cv = \\\n StratifiedKFold(n_splits=n_splits_outer,\n shuffle=True, # each fold is independent\n random_state=random_state)\n\n super().__init__(gridsearch,\n n_components,\n classifier,\n cv,\n min_resources,\n factor,\n score_func,\n average,\n random_state,\n n_jobs,\n **kwargs,\n )", "def evaluate(self, X, y, hypes={}, n_splits=5, shuffle=True, standardize=True, groups=None):\n \n ### SET HYPERPARAMETERS ###\n model = clone(self.estimator) # Gotta do this otherwise funky things happen\n model.set_params(**hypes)\n \n ### INITIALIZE SCORING DATAFRAME ###\n fractions = ['train', 'val']\n scoring_metrics = ['mae', 'mape', 'medape', 'pearson', 'spearman']\n score_columns = pd.MultiIndex.from_product([fractions, scoring_metrics]) # This sets up a heirarchical index for the results dataframe\n score = pd.DataFrame(columns=score_columns)\n\n ### SET UP X-VALIDATION ###\n \n if groups is not None:\n cv = model_selection.LeaveOneGroupOut()\n splitter = enumerate(cv.split(X,y,groups))\n else:\n cv = model_selection.KFold(n_splits=n_splits, shuffle=shuffle)\n splitter = enumerate(cv.split(X,y))\n\n ### RUN CV AND SCORE MODEL ###\n last_splits = [] # Keep track of split indices for forensics\n for idx, (train, val) in splitter:\n\n X_train = X.iloc[train,:]; y_train = y.iloc[train]\n X_val = X.iloc[val,:]; y_val = y.iloc[val]\n \n if standardize:\n std = preprocessing.StandardScaler()\n std.fit(X_train)\n X_train, X_val = std.transform(X_train), std.transform(X_val)\n\n # if idx==0:\n # for v in ['X_train','y_train','X_val','y_val']:\n # print('{} shape: {}'.format(v, eval('{}.shape'.format(v))))\n\n ### INSTANTIATE AND FIT MODEL ###\n last_splits.append((train, val))\n model.fit(X_train, y_train)\n\n for frac in ['train','val']:\n \n # y_true will either be y_train or y_val depending on what 'frac' is. Kind of hacky.\n y_true = eval('y_'+frac)\n y_pred = model.predict(eval('X_'+frac))\n \n # Calculate MAE\n score.loc[idx, (frac,'mae')] = \\\n metrics.mean_absolute_error(y_true, y_pred)\n \n # Calculate MAPE\n score.loc[idx, (frac,'mape')] = \\\n mean_absolute_percentage_error(y_true, y_pred)\n \n # Calculate MedAPE\n score.loc[idx, (frac,'medape')] = \\\n median_absolute_percentage_error(y_true, y_pred)\n\n # Calculate pearson\n score.loc[idx, (frac,'pearson')] = \\\n stats.pearsonr(y_true, y_pred)[0]\n\n # Calculate spearman\n score.loc[idx, (frac,'spearman')] = \\\n stats.spearmanr(y_true, y_pred)[0]\n\n self.estimator = model\n self.last_scores = score\n self.last_hypes = hypes\n self.last_splits = last_splits\n\n return score", "def __init__(\n self,\n model=None,\n n_neighbors=25,\n metric=\"euclidean\",\n min_cluster_size=10,\n leiden_repeats=200,\n leiden_resolution=1,\n target_accuracy=0.95,\n consensus_rate=0.7,\n random_state=0,\n train_frac=0.5,\n train_max_n=500,\n max_iter=50,\n n_jobs=-1,\n ):\n # input metrics\n self.min_cluster_size = min_cluster_size\n self.consensus_rate = consensus_rate # this prevents merging gradient clusters\n self.leiden_repeats = leiden_repeats\n self.leiden_resolution = leiden_resolution\n self.random_state = random_state\n self.n_jobs = n_jobs\n self.n_neighbors = n_neighbors\n self.knn_metric = metric\n self.train_frac = train_frac\n self.train_max_n = train_max_n\n self.max_iter = max_iter\n self.n_obs, self.n_pcs = None, None\n self.X = None\n self._neighbors = None\n self.step_data = OrderedDict()\n self.target_accuracy = target_accuracy\n\n # multiple leiden clustering\n self.leiden_result_df = None\n self._multi_leiden_clusters = None\n\n # model training and outlier rescue\n self.supervise_model = model\n self._label_with_leiden_outliers = None\n self.label = None\n self.label_proba = None\n self.cv_predicted_label = None\n self.final_accuracy = None\n return", "def run(\n self,\n number_of_clusters=None,\n max_K=8,\n method_clustering=\"pam\",\n init_clustering=\"random\",\n max_iter_clustering=100,\n discart_value_JI=0.6,\n bootstraps_JI=100,\n bootstraps_p_value=100,\n n_jobs=1,\n verbose=1,\n ):\n\n if number_of_clusters is None:\n self.k = optimizer.optimizeK(\n self.distance_matrix,\n self.y.to_numpy(),\n self.model_type,\n max_K,\n method_clustering,\n init_clustering,\n max_iter_clustering,\n discart_value_JI,\n bootstraps_JI,\n self.random_state,\n n_jobs,\n verbose,\n )\n\n if self.k == 1:\n warnings.warn(\"No stable clusters were found!\")\n return\n\n print(f\"Optimal number of cluster is: {self.k}\")\n\n else:\n self.k = number_of_clusters\n print(f\"Use {self.k} as number of cluster\")\n\n self.cluster_labels = (\n kmedoids.KMedoids(\n n_clusters=self.k,\n method=method_clustering,\n init=init_clustering,\n metric=\"precomputed\",\n max_iter=max_iter_clustering,\n random_state=self.random_state,\n )\n .fit(self.distance_matrix)\n .labels_\n )\n\n (\n self._data_clustering_ranked,\n self.p_value_of_features,\n ) = stats.calculate_global_feature_importance(\n self.X, self.y, self.cluster_labels, self.model_type\n )\n self._p_value_of_features_per_cluster = (\n stats.calculate_local_feature_importance(\n self._data_clustering_ranked, bootstraps_p_value\n )\n )", "def __init__(self, conn, args, data, split_type, num_clusters):\n\n self.conn = conn\n self.args = args\n self.data = data\n self.split_type = split_type\n\n self.pca_model = None\n self.cluster_model = None\n self.algorithm = args['cluster_algorithm']\n\n # http://scikit-learn.org/stable/auto_examples/cluster/plot_cluster_comparison.html\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n affinity_propagation = cluster.AffinityPropagation()\n ms = cluster.MeanShift(bin_seeding=True)\n spectral = cluster.SpectralClustering(n_clusters=num_clusters, \n eigen_solver='arpack',\n affinity=\"nearest_neighbors\", \n random_state=self.args['seed'])\n ward = cluster.AgglomerativeClustering(n_clusters=num_clusters, \n linkage='ward')\n birch = cluster.Birch(n_clusters=num_clusters)\n two_means = cluster.MiniBatchKMeans(n_clusters=num_clusters,\n random_state=self.args['seed'])\n average_linkage = cluster.AgglomerativeClustering(linkage=\"average\", \n n_clusters=num_clusters)\n hdbsc = hdbscan.HDBSCAN(min_cluster_size=10)\n kmeans = cluster.KMeans(n_clusters=num_clusters, random_state=self.args['seed'])\n dbscan = cluster.DBSCAN()\n \n self.clustering_algorithms = {\n 'MiniBatchKMeans': two_means,\n 'AffinityPropagation': affinity_propagation,\n 'MeanShift': ms,\n 'SpectralClustering': spectral,\n 'Ward': ward,\n 'AgglomerativeClustering': average_linkage,\n 'DBSCAN': dbscan,\n 'Birch': birch,\n 'HDBSCAN': hdbsc,\n 'KMeans': kmeans\n }", "def _fit(\n self,\n x,\n clusters=50,\n a=5,\n Niter=15,\n device=None,\n backend=None,\n approx=False,\n n=50,\n ):\n if type(clusters) != int:\n raise ValueError(\"Clusters must be an integer\")\n if clusters >= len(x):\n raise ValueError(\"Number of clusters must be less than length of dataset\")\n if type(a) != int:\n raise ValueError(\"Number of clusters to search over must be an integer\")\n if a > clusters:\n raise ValueError(\n \"Number of clusters to search over must be less than total number of clusters\"\n )\n if len(x.shape) != 2:\n raise ValueError(\"Input must be a 2D array\")\n if self.__normalise:\n x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(\n -1, x.shape[1]\n )\n\n # if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric\n if approx and self.__metric == \"angular\":\n self.__update_metric(\"angular_full\")\n\n x = self.tools.contiguous(x)\n self.__device = device\n self.__backend = backend\n\n cl, c = self.tools.kmeans(\n x,\n self.__distance,\n clusters,\n Niter=Niter,\n device=self.__device,\n approx=approx,\n n=n,\n )\n\n self.__c = c\n cl = self.__assign(x)\n\n ncl = self.__k_argmin(c, c, k=a)\n self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)\n\n x, x_labels = self.__sort_clusters(x, cl, store_x=True)\n self.__x = x\n r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)\n self.__keep = self.tools.to(\n self.tools.zeros([clusters, clusters], dtype=bool), self.__device\n )\n self.__keep[r, ncl.flatten()] = True\n\n return self", "def __init__(self,\n num_clusters,\n model_dir=None,\n initial_clusters=RANDOM_INIT,\n distance_metric=SQUARED_EUCLIDEAN_DISTANCE,\n random_seed=0,\n use_mini_batch=True,\n mini_batch_steps_per_iteration=1,\n kmeans_plus_plus_num_retries=2,\n relative_tolerance=None,\n config=None):\n params = {}\n params['num_clusters'] = num_clusters\n params['training_initial_clusters'] = initial_clusters\n params['distance_metric'] = distance_metric\n params['random_seed'] = random_seed\n params['use_mini_batch'] = use_mini_batch\n params['mini_batch_steps_per_iteration'] = mini_batch_steps_per_iteration\n params['kmeans_plus_plus_num_retries'] = kmeans_plus_plus_num_retries\n params['relative_tolerance'] = relative_tolerance\n super(KMeansClustering, self).__init__(\n model_fn=_kmeans_clustering_model_fn,\n params=params,\n model_dir=model_dir,\n config=config)", "def apply_evaluation(x, label_true, params, names, database_name):\n rows = []\n\n for i in range(0, len(names)):\n act_name = names[i]\n act_data = x[i]\n\n algorithm = KMeans(k=params['k'], seed=params['seed'], max_it=params['max_it'], tol=params['tol'])\n labels = algorithm.fit_predict(act_data)\n\n unsupervised = evaluate_unsupervised_internal(act_data, labels)\n supervised = evaluate_supervised_external(label_true, labels)\n\n row = {**dict(Names=act_name), **supervised, **unsupervised}\n rows.append(row)\n df_results = pd.DataFrame(rows)\n set_output(df_results, 'pca_analysis_'+database_name)", "def __existence_classification__(self,task_id,shape,aggregations):\n\n # aggregations = {}\n\n # raw_classifications and clustering_results have different hierarchy orderings- raw_classifications\n # is better for processing data and clustering_results is better for showing the end result\n # technically we only need to look at the data from clustering_results right now but its\n # hierarchy is really inefficient so use raw_classifications to help\n\n # each shape is done independently\n\n # set - so if multiple tools create the same shape - we only do that shape once\n # for shape in set(marking_tasks[task_id]):\n\n\n # pretentious name but basically whether each person who has seen a subject thinks it is a true\n # positive or not\n existence_classification = {\"param\":\"subject_id\"}\n\n global_cluster_index = 0\n # clusters_per_subject = []\n\n # look at the individual points in the cluster\n for subject_id in aggregations.keys():\n if subject_id == \"param\":\n continue\n\n # gold standard pts may not match up perfectly with the given clusters -\n # for example, we could have a gold penguin at 10,10 but the users' cluster\n # is centered at 10.1,9.8 - same penguin though\n # so as we go through the clusters, we need to see which ones match up more closely\n # with the gold standard\n # if subject_id in gold_standard_clustering[0]:\n # # closest cluster and distance\n # gold_to_cluster = {pt:(None,float(\"inf\")) for pt in gold_standard_clustering[0][subject_id]}\n # else:\n # gold_to_cluster = None\n\n\n # clusters_per_subject.append([])\n\n # # in either case probably an empty image\n # if subject_id not in clustering_results:\n # continue\n # if task_id not in clustering_results[subject_id]:\n # continue\n\n if (shape+ \" clusters\") not in aggregations[subject_id][task_id]:\n # if none of the relevant markings were made on this subject, skip it\n continue\n\n all_users = aggregations[subject_id][task_id][shape+ \" clusters\"][\"all_users\"]\n\n for local_cluster_index in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n if local_cluster_index == \"all_users\":\n continue\n\n # extract the users who marked this cluster\n cluster = aggregations[subject_id][task_id][shape+ \" clusters\"][local_cluster_index]\n\n # todo - put this back when we support gold standard clustering\n # # is this user cluster close to any gold standard pt?\n # if subject_id in gold_standard_clustering[0]:\n # x,y = cluster[\"center\"]\n # for (gold_x,gold_y) in gold_to_cluster:\n # dist = math.sqrt((x-gold_x)**2+(y-gold_y)**2)\n # if dist < gold_to_cluster[(gold_x,gold_y)][1]:\n # gold_to_cluster[(gold_x,gold_y)] = local_cluster_index,dist\n #\n # # now repeat for negative gold standards\n # if subject_id in gold_standard_clustering[1]:\n # x,y = cluster[\"center\"]\n # min_dist = float(\"inf\")\n # closest= None\n # for x2,y2 in gold_standard_clustering[1][subject_id]:\n # dist = math.sqrt((x-x2)**2+(y-y2)**2)\n # if dist < min_dist:\n # min_dist = min(dist,min_dist)\n # closest = (x2,y2)\n # if min_dist == 0.:\n # assert (x,y) == closest\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 0\n\n users = cluster[\"users\"]\n\n ballots = []\n\n # todo - the 15 hard coded value - might want to change that at some point\n for u in all_users:\n if u in users:\n ballots.append((u,1))\n else:\n ballots.append((u,0))\n\n existence_classification[(subject_id,local_cluster_index)] = ballots\n # clusters_per_subject[-1].append(global_cluster_index)\n # global_cluster_index += 1\n\n # # note we don't care about why a cluster corresponds to a gold standard pt - that is\n # # it could be really close to given gold standards - the point is that it is close\n # # to at least one of them\n # if gold_to_cluster is not None:\n # for (local_cluster_index,dist) in gold_to_cluster.values():\n # # arbitrary threshold but seems reasonable\n # if dist < 1:\n # mapped_gold_standard[(subject_id,local_cluster_index)] = 1\n\n existence_results = self.__task_aggregation__(existence_classification,task_id,{})#,mapped_gold_standard)\n assert isinstance(existence_results,dict)\n\n for subject_id,cluster_index in existence_results:\n new_results = existence_results[(subject_id,cluster_index)][task_id]\n # new_agg = {subject_id: {task_id: {shape + \" clusters\": {cluster_index: {\"existence\": new_results}}}}}\n # aggregations = self.__merge_results__(aggregations,new_agg)\n aggregations[subject_id][task_id][shape + \" clusters\"][cluster_index][\"existence\"] = new_results\n # if subject_id not in aggregations:\n # aggregations[subject_id] = {}\n # if task_id not in aggregations[subject_id]:\n # aggregations[subject_id][task_id] = {}\n # if (shape + \" clusters\") not in aggregations[subject_id][task_id]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"] = {}\n # # this part is probably redundant\n # if cluster_index not in aggregations[subject_id][task_id][shape+ \" clusters\"]:\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index] = {}\n #\n # aggregations[subject_id][task_id][shape+ \" clusters\"][cluster_index][\"existence\"] = existence_results[(subject_id,cluster_index)]\n\n return aggregations", "def evalute_subset(X_train, X_test, y_train, y_test):\r\n clf = KNeighborsClassifier(n_neighbors=3)\r\n clf.fit(X_train, y_train) \r\n y_pred = clf.predict(X_test)\r\n return accuracy_score(y_test, y_pred)", "def evaluation_k_means(X_selected, n_clusters, y, n_jobs = 1):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=n_jobs)\n \n k_means.fit(X_selected)\n y_predict = k_means.labels_\n \n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict)\n \n # calculate ACC\n y_permuted_predict = best_map(y, y_predict)\n acc = accuracy_score(y, y_permuted_predict)\n \n return nmi, acc", "def train(x_train, y_train, x_test, y_test):\n\n print(\" Nearest centroid : \", end='')\n run(x_train, y_train, x_test, y_test, NearestCentroid())\n print(\" k-NN classifier (k=3) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=3))\n print(\" k-NN classifier (k=7) : \", end='')\n run(x_train, y_train, x_test, y_test, KNeighborsClassifier(n_neighbors=7))\n print(\" Naive Bayes (Gaussian) : \", end='')\n run(x_train, y_train, x_test, y_test, GaussianNB())\n print(\" Random Forest (trees= 5) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=5))\n print(\" Random Forest (trees= 50) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=50))\n print(\" Random Forest (trees=500) : \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=500))\n print(\" Random Forest (trees=1000): \", end='')\n run(x_train, y_train, x_test, y_test, RandomForestClassifier(n_estimators=1000))\n print(\" LinearSVM (C=0.01) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.01))\n print(\" LinearSVM (C=0.1) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=0.1))\n print(\" LinearSVM (C=1.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=1.0))\n print(\" LinearSVM (C=10.0) : \", end='')\n run(x_train, y_train, x_test, y_test, LinearSVC(C=10.0))", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls" ]
[ "0.6347601", "0.6259447", "0.6242199", "0.619887", "0.6134004", "0.6122757", "0.6117709", "0.6106118", "0.61054987", "0.6089926", "0.60873955", "0.6071255", "0.60541445", "0.6051448", "0.60498863", "0.6030983", "0.60236627", "0.60101587", "0.5973187", "0.5970434", "0.5955364", "0.5954432", "0.59430915", "0.59396106", "0.59386337", "0.5863862", "0.5857013", "0.5841426", "0.5829467", "0.5824277" ]
0.7258767
0
Gives the number of observations in each cluster
def cluster_obs_count(self): return(self.merged_data.groupby( 'labels').count().transpose().iloc[0, :])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_cluster_count(self) -> int:\n return len(self.get_all_cluster_ids())", "def cluster_count(self) -> int:\n cluster_count = max(1, round(16**3 * (self.vein.purity / 100.0) / self.cluster_size))\n return self.distribution.scale_cluster_count(cluster_count)", "def analysis_function_num_clusters(self,clustering):\n return len(clustering.clusters)", "def n_clusters(self):\n return len(self.clusters)", "def gal_count(clusters):\n sum = 0\n for x in clusters:\n sum += x.ngal\n return sum", "def n_clusters(self):\n return self.model.n_clusters", "def cluster_counter(self):\n return Counter(self.model.labels_.tolist())", "def get_cluster_sizes(self,sc):\n\n clusterSizes = []\n clusters = sc.labels\n for k in np.arange(clusters.max()):\n clusterSizes.append(len(np.where(clusters==k)[0])) \n \n return clusterSizes", "def cluster_size(result, var):\n df=calculate_cluster_size(result, var)\n df['cus']=df.index\n return df", "def get_number_of_clusters(df, use_pca, n_components):\n n_clusters = 10\n cluster_with_distances = []\n for i in range(n_clusters):\n pipe = _build_model(df, use_pca, n_components, use_kmeans=True, n_clusters=i + 1)\n cluster_with_distances.append(pipe.named_steps['kmeans'].inertia_)\n plt.figure(6, figsize=(12, 6))\n plt.plot(range(1, 11), cluster_with_distances, 'o')\n plt.plot(range(1, 11), cluster_with_distances, '-', alpha=0.5)\n plt.title('The Elbow Criterion')\n plt.xlabel('number of cluster')\n plt.ylabel('Sum of squared distances of samples to their closest cluster center')\n plt.show()", "def num_links(self):\n count=0.0\n for cluster in self.clusters:\n if self.clusters[cluster] == self.clusters[cluster].antecessor:\n numberofmembers=self.clusters[cluster].number_of_members\n count+=numberofmembers\n return count", "def count(self):\n\n if self.cluster:\n return self.cluster.count()\n\n return super().count()", "def getNbClusters( model):\r\n\r\n\tlabels = model.labels_\r\n\tlabelValues = []\r\n\tfor label in labels:\r\n\t\tif label not in labelValues and label != -1: labelValues.append(label)\r\n\tnbClusters = len( labelValues)\r\n\treturn nbClusters", "def calc_Nw(cluster_labels):\n\n cluster_labels = np.array(cluster_labels)\n labels_set = set(cluster_labels)\n n_labels = len(labels_set)\n\n Nw = []\n for label in labels_set:\n n_examples = np.sum(np.where(cluster_labels == label, 1, 0))\n n_cluster_pairs = n_examples * (n_examples - 1) / 2 # Combinations\n Nw.append(n_cluster_pairs)\n\n return int(np.sum(Nw))", "def generate_clusters_n(df, tweet_deleted):\n # cluster_labels, n_clusters = dbscan(normalised_df, true_labels, 0.25, 30)\n # print(\"normalised_df.head()\", normalised_df.head())\n clusterer = hdbscan.HDBSCAN(min_cluster_size=10)\n clusterer.fit(df)\n labels = clusterer.labels_\n cluster_groups = {}\n for i in labels:\n if cluster_groups.get(i):\n cluster_groups[i] = cluster_groups[i] + 1\n else:\n cluster_groups[i] = 1\n print(\"cluster_groups\", cluster_groups)\n df[\"cluster\"] = labels\n df[\"tweet_deleted\"] = tweet_deleted\n cluster_results = list()\n for cluster_no in cluster_groups.keys():\n print(\"++++++++++\")\n print(\"cluster_no\", cluster_no)\n cluster_result = list()\n cluster_result.append(cluster_no)\n\n cluster = df.mask('cluster', cluster_no)\n print(cluster_no, \" :\")\n tweet_deleted = cluster.mask('tweet_deleted', True).shape[0]\n not_tweet_deleted = cluster.mask('tweet_deleted', False).shape[0]\n print(\"deleted_df len:\", tweet_deleted)\n print(\"not_deleted_df len:\", not_tweet_deleted)", "def calculate_cluster_size(result, var):\n \n cluster_results=pd.DataFrame(result[var].value_counts())\n ratio=np.round(cluster_results/cluster_results.sum()*100, 2).rename(columns={var:\"ratio\"})\n return cluster_results.join(ratio)", "def set_nb_clusters(self):\n \n print(\"Finding the optimal number of clusters...\")\n \n sample = ro.r.matrix(self.df[self.df[\"filename\"].between(1, 4)][\"active_power\"].to_numpy())\n \n r=ro.r(\"\"\"\n check = function(matrix) {\n n_clust = fviz_nbclust(matrix, kmeans, k.max = 15)\n\n n_clust = n_clust$data\n\n max_cluster = as.numeric(n_clust$clusters[which.max(n_clust$y)])\n return(max_cluster)\n }\n \"\"\")\n\n result = r(sample)\n self.conf[\"nb_clust\"] = int(result[0])\n \n print(f\"Optimal number of clusters is {self.conf['nb_clust']}\\n\")", "def test_count_reads_per_cluster(self):\n \n bedtool = pybedtools.BedTool(clipper.test_file(\"clip_analysis_test_peak_results.bed\"))\n \n total_reads, reads_per_cluster = count_reads_per_cluster(bedtool, None)\n \n self.assertListEqual([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36], reads_per_cluster)\n self.assertEqual(sum([147,52, 239, 85, 47, 119, 58, 588, 92, 59, 196, 36]), total_reads)", "def analysis_function_total_elements(self,clustering):\n return clustering.total_number_of_elements", "def count_all_cluster_instances(cluster_name, predictive=False, exclude_node_label_keys=app_config[\"EXCLUDE_NODE_LABEL_KEYS\"]):\n\n # Get the K8s nodes on the cluster, while excluding nodes with certain label keys\n k8s_nodes = get_k8s_nodes(exclude_node_label_keys)\n\n count = 0\n asgs = get_all_asgs(cluster_name)\n for asg in asgs:\n instances = asg['Instances']\n if predictive:\n count += asg['DesiredCapacity']\n else:\n # Use the get_node_by_instance_id() function as it only returns the node if it is not excluded by K8s labels\n for instance in instances:\n instance_id = instance['InstanceId']\n try:\n get_node_by_instance_id(k8s_nodes, instance_id)\n count += 1\n except Exception:\n logger.info(\"Skipping instance {}\".format(instance_id))\n logger.info(\"{} asg instance count in cluster is: {}. K8s node count should match this number\".format(\"*** Predicted\" if predictive else \"Current\", count))\n return count", "def get_count_all(cls, context, cluster_id):\n return cls.dbapi.get_cluster_nodegroup_count(context, cluster_id)", "def data_center_count(self) -> int:\n return pulumi.get(self, \"data_center_count\")", "def test_clusters(trained_data, centroids):\n\n for c in range(len(centroids)):\n count_1 = 0\n count_0 = 0\n for p in range(len(trained_data)):\n if trained_data[p][-2] == 0 and trained_data[p][-1] == centroids[c]:\n count_0 += 1\n if trained_data[p][-2] == 1 and trained_data[p][-1] == centroids[c]:\n count_1 += 1\n print (\"Centroid \", c+1, \":\", centroids[c])\n print(\"Number of 1's: \", count_1)\n print(\"Number of 0's: \", count_0)\n print(\"Percent 1's: \", round((count_1/(count_1 + count_0))*100,2))\n print(\"Percent 0's: \", round((count_0 / (count_1 + count_0)) * 100,2))\n print(\"****************\")", "def dissimilarity(clusters):\n totDist = 0\n for c in clusters:\n totDist += c.variability()\n return totDist", "def cluster_cal(self):\n self.Cluster = []\n for i in range(self.nodenum):\n neighborhood_node = self.neighbor_node(i)\n Node_num = len(neighborhood_node)\n Count = self.neighbor_edge(neighborhood_node)\n if(Node_num == 0 or Node_num == 1):\n self.Cluster.append(0.5)\n else:\n self.Cluster.append(Count/(Node_num*(Node_num - 1)))\n \n self.cluster_coeff = np.average(self.Cluster)", "def cluster_mean_shift(vectors):\n ms = MeanShift()\n ms.fit(vectors)\n\n labels = ms.labels_\n labels_unique = np.unique(labels)\n n_clusters = len(labels_unique)\n\n print(\"Discovered {} clusters\".format(n_clusters))\n print(labels)\n\n return labels, n_clusters", "def getCounts(training_data, test_row, k):\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n\n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n \n return counts", "def get_cluster_indices(self,dataset, cluster_number):\n\t\tself.__init__(dataset, self.k)\n\t\tself.e_step() #got responsibilities\n\t\tmax_cluster = np.argmax(self.w, axis = 1)\n\t\tindices = []\n\t\tfor i in range(dataset.shape[0]):\n\t\t\tif max_cluster[i] == cluster_number:\n\t\t\t\tindices.append(i)\n\t\treturn indices", "def cluster(dfi, samples, num_clusters=8, random_state=1):\n df = dfi.fillna(0)\n X = df[samples].values\n kmeans = KMeans(n_clusters=num_clusters,\n random_state=random_state).fit(X)\n cluster_number = kmeans.labels_\n df['kmeans_cluster_number'] = cluster_number\n return df", "def get_cluster_idx(_cluster):\n\n return _cluster.cluster_idx" ]
[ "0.7697342", "0.7521841", "0.74846464", "0.745221", "0.73620665", "0.7293663", "0.7275436", "0.7197757", "0.7003787", "0.6995928", "0.6992263", "0.69813204", "0.68972546", "0.67149794", "0.6631092", "0.66106117", "0.6603212", "0.65768135", "0.65458935", "0.65229785", "0.6497056", "0.6440297", "0.64380723", "0.64043087", "0.6332654", "0.62918615", "0.6266118", "0.6259073", "0.62547225", "0.6248232" ]
0.8255985
0
Provides the means of the cluster features for each cluster If evaluate_by is set, then clusters will be sorted by the mean value of the "evaluate_by" column
def cluster_means(self): if self.evaluate_by is not None: return(self.merged_data.groupby( 'labels').mean().sort_values(self.evaluate_by).transpose()) else: return(self.merged_data.groupby('labels').mean().transpose())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_means_scaled(self):\n if self.evaluate_by is not None:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_scaled_data.groupby(\n 'labels').mean().transpose())", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def compute_means(runtimes):\n# tmp = runtimes[kernel_name]\n tmp_ = [ (int(key), float(np.mean(val)))\n for key, val in runtimes.iteritems()\n ]\n return sort_fst(tmp_)", "def grouping(data_clust):\n data_grouped = data_clust.groupby('Clusters').mean()\n return data_grouped", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def clusterting_feature_importance (df, cluster_col):\r\n scores = pd.DataFrame()\r\n df0 = df.copy()\r\n df0 = df.select_dtypes(include=np.number)\r\n\r\n for i in df0[cluster_col].unique():\r\n df2 = df0[df0[cluster_col] == i]\r\n df2.drop(cluster_col,axis=1, inplace=True)\r\n #df2 = df.select_dtypes(include=np.number)\r\n scores[i] = df2.std() / (df2.max() - df2.min())\r\n scores['mean'] = scores.mean(axis = 1)\r\n\r\n scores = 1 - scores\r\n\r\n return scores", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def compute_cluster_ensemble(var, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n num_clusters = maxIndices.shape[0]\n if len(var.shape) == 1:\n meanvar = np.zeros((num_clusters,))\n elif len(var.shape) == 2:\n meanvar = np.zeros((var.shape[0],num_clusters))\n else:\n warnings.warn('did not have correct shape for ' + str(var) + ' with len(var.shape)='+ str(len(var.shape)))\n meanvar = None\n\n for aCluster, maxInd in enumerate(maxIndices):\n # get particles in cluster\n particles = indicesToParticle[indicesOnCluster[aCluster,0:maxInd]]\n\n # compute mean depending upon size of array\n if len(var.shape) == 1:\n meanvar[aCluster] = np.mean(var[particles])\n if len(var.shape) == 2:\n meanvar[:,aCluster] = np.mean(var[:,particles], axis=1)\n\n return meanvar #}}}", "def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:\n return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def evaluation_k_means(X_selected, n_clusters, y, n_jobs = 1):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=n_jobs)\n \n k_means.fit(X_selected)\n y_predict = k_means.labels_\n \n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict)\n \n # calculate ACC\n y_permuted_predict = best_map(y, y_predict)\n acc = accuracy_score(y, y_permuted_predict)\n \n return nmi, acc", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def get_means(self):\n if self.cv_method == 'fixed':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n elif self.cv_method == 'crossvalidation':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n else:\n perf = self.evaluations\n while len(perf.shape) > 2:\n perf = np.nanmean(perf, axis=-1)\n perf = perf[~np.isnan(perf[:, 0])]\n perf = np.mean(perf, axis=0)\n return perf", "def get_means(self):\n if self.metadata is None:\n self.get_metadata()\n\n # we want only the numerical features\n df = self.metadata.select_dtypes(include=['int64', 'float64'])\n return df.mean()", "def compute_means(self):\n ###TODO\n vector_means = []\n for doc in self.fin_clust.values():\n vec = defaultdict(float)\n for d_id in doc:\n doc_keys = self.docs[d_id].keys()\n for key in self.docs[d_id]:\n vec[key] = vec[key] + self.docs[d_id][key]\n tot = len(doc)\n x = defaultdict(float)\n for k,v in vec.items():\n x[k] = float(v)/tot\n vec = Counter(x)\n vector_means.append(vec)\n return vector_means", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def get_mean_profit_per_class_from_train_df(df_profit_per_cluster_train: pd.core.frame.DataFrame) -> tuple:\n # if condition returns False, AssertionError is raised:\n assert len(df_profit_per_cluster_train) >= 3, \"Algorithm, returned less than 3 clusters.\"\n\n df_profit_per_cluster = df_profit_per_cluster_train.sort_values(by='profit', ascending=False)\n group_size = int(len(df_profit_per_cluster) / 3)\n\n buy_clusters_mean_profit = df_profit_per_cluster.iloc[:group_size]['profit'].mean()\n sell_clusters_mean_profit = df_profit_per_cluster.iloc[-group_size:]['profit'].mean()\n\n buy_clusters_list = list(df_profit_per_cluster.iloc[:group_size]['cluster'])\n sell_clusters_list = list(df_profit_per_cluster.iloc[-group_size:]['cluster'])\n\n return buy_clusters_mean_profit, buy_clusters_list, sell_clusters_mean_profit, sell_clusters_list", "def ensemble_mean(self):\n new_cubelist = []\n for cube in self.cubelist:\n new_cubelist.append(self.cube_ensemble_mean(cube))\n self.cubelist = iris.cube.CubeList(new_cubelist)\n self.processes.append('ensemble_mean')\n return self.cubelist", "def media(self):\n self.kmeans = [[] for i in range(0,self.cluster_number)]\n for i in range(self.cluster_number):\n for j in range(0,len(self.cluster[i][0])):\n self.kmeans[i].append(np.sum(self.cluster[i][::,j:j+1:])/len(self.cluster[i][::,j:j+1:]))", "def kmeans_cluster(X_train_input, n_clusters=100):\r\n from sklearn.cluster import MiniBatchKMeans\r\n image_descriptors = []\r\n [image_descriptors.extend(ORB_feature_extractor(img)) for img in X_train_input]\r\n image_descriptors = np.array(image_descriptors) \r\n \r\n kmeans_model = MiniBatchKMeans(n_clusters=n_clusters, init_size=5*n_clusters,\r\n random_state=34, batch_size=128).fit(image_descriptors)\r\n \r\n return kmeans_model", "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def evaluation(X_selected, X_test, n_clusters, y):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=1)\n\n k_means.fit(X_selected)\n y_predict = k_means.predict(X_test)\n\n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict, average_method='arithmetic')\n\n sil = silhouette_score(X_test, y_predict, metric=\"euclidean\")\n db_score = davies_bouldin_score(X_test, y_predict)\n ch_score = calinski_harabasz_score(X_test, y_predict)\n purity = calcolaPurity(y, y_predict)\n\n return nmi, sil, db_score, ch_score, purity", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def fit(self):\n self.cluseter_agglomerative(n_clusters=20, linkage='average', iterate=5)\n self.sub_clustering(n_clusters=3, index_cluster=[79], linkage='complete')\n self.merge_clusters([[0,9,53],[1,83],[46,35,67],[88,23],[6,68]])\n self.merge_clusters([[6,33,52],[17,14]])\n self.sub_clustering(n_clusters=2, index_cluster=[0], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[2], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[85], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[14], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[16], linkage='average')\n self.sub_clustering(n_clusters=3, index_cluster=[22], linkage='average')\n self.sub_clustering(n_clusters=2, index_cluster=[24], linkage='complete')\n self.sub_clustering(n_clusters=2, index_cluster=[26], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[28], linkage='ward')\n self.merge_clusters([[6,98,99]])\n self.merge_clusters([[35,80]])\n self.sub_clustering(n_clusters=4, index_cluster=[35], linkage='complete')\n self.merge_clusters([[76,98]])\n self.sub_clustering(n_clusters=3, index_cluster=[35], linkage='complete')\n self.merge_clusters([[39,42]])\n self.sub_clustering(n_clusters=3, index_cluster=[47], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='average')\n self.merge_clusters([[70,101]])\n self.sub_clustering(n_clusters=3, index_cluster=[51], linkage='complete')\n self.sub_clustering(n_clusters=3, index_cluster=[61], linkage='ward')\n self.merge_clusters()\n return", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def get_clusters_with_all_features(df, n_clusters):\n pipe = _build_model(df, use_pca=False, n_components=0, use_kmeans=True, n_clusters=n_clusters)\n labels = pipe.named_steps['kmeans'].labels_\n df.loc[:, 'labels'] = labels\n print(df.groupby('labels').agg(\n {'Fresh': 'mean', 'Milk': 'mean', 'Grocery': 'mean', 'Frozen': 'mean', 'Detergents_Paper': 'mean',\n 'Delicassen': 'mean'}))\n print(pipe.named_steps['scaler'].inverse_transform(pipe.named_steps['kmeans'].cluster_centers_))\n # cluster 1: low spending behaviour in general\n # cluster 2: high spending in detergents_paper, milk, grocery\n # cluster 3: high spending in fresh, rest low\n # cluster 4: high spending in everything except detergents_paper, extremely high in delicassen\n # cluster 5: medium spending in general, low in frozen, high in detergents and paper", "def compute_clusters(addons_df, num_clusters, random_seed):\n\n # Build the stages of the pipeline. We need hashing to make the next\n # steps work.\n hashing_stage = HashingTF(inputCol=\"addon_ids\", outputCol=\"hashed_features\")\n idf_stage = IDF(inputCol=\"hashed_features\", outputCol=\"features\", minDocFreq=1)\n # As a future improvement, we may add a sane value for the minimum cluster size\n # to BisectingKMeans (e.g. minDivisibleClusterSize). For now, just make sure\n # to pass along the random seed if needed for tests.\n kmeans_kwargs = {\"seed\": random_seed} if random_seed else {}\n bkmeans_stage = BisectingKMeans(k=num_clusters, **kmeans_kwargs)\n pipeline = Pipeline(stages=[hashing_stage, idf_stage, bkmeans_stage])\n\n # Run the pipeline and compute the results.\n model = pipeline.fit(addons_df)\n return model.transform(addons_df).select([\"client_id\", \"prediction\"])" ]
[ "0.74477243", "0.64676", "0.630643", "0.61944944", "0.6128539", "0.6070738", "0.604014", "0.58796227", "0.58664966", "0.5865818", "0.584644", "0.5843564", "0.57876366", "0.5773927", "0.5773633", "0.5760417", "0.57511604", "0.57441485", "0.57324463", "0.5730679", "0.5674215", "0.56381613", "0.56201094", "0.56066835", "0.5583054", "0.55741805", "0.5574175", "0.55726594", "0.5557826", "0.55443263" ]
0.8206238
0
Provides the means (scaled) of the cluster features for each cluster If evaluate_by is set, then clusters will be sorted by the mean value of the "evaluate_by" column
def cluster_means_scaled(self): if self.evaluate_by is not None: return(self.merged_scaled_data.groupby( 'labels').mean().sort_values(self.evaluate_by).transpose()) else: return(self.merged_scaled_data.groupby( 'labels').mean().transpose())
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def cluster_means(self):\n if self.evaluate_by is not None:\n return(self.merged_data.groupby(\n 'labels').mean().sort_values(self.evaluate_by).transpose())\n else:\n return(self.merged_data.groupby('labels').mean().transpose())", "def _compute_cluster_averages(self, key=\"_scvi_labels\"):\n # find cell label column\n label_col = self.adata.uns[\"_scvi\"][\"categorical_mappings\"][key][\"original_key\"]\n\n # find data slot\n x_dict = self.adata.uns[\"_scvi\"][\"data_registry\"][\"X\"]\n if x_dict[\"attr_name\"] == \"X\":\n use_raw = False\n else:\n use_raw = True\n if x_dict[\"attr_name\"] == \"layers\":\n layer = x_dict[\"attr_key\"]\n else:\n layer = None\n\n # compute mean expression of each gene in each cluster/batch\n aver = compute_cluster_averages(self.adata, labels=label_col, use_raw=use_raw, layer=layer)\n\n return aver", "def compute_means(runtimes):\n# tmp = runtimes[kernel_name]\n tmp_ = [ (int(key), float(np.mean(val)))\n for key, val in runtimes.iteritems()\n ]\n return sort_fst(tmp_)", "def clusterting_feature_importance (df, cluster_col):\r\n scores = pd.DataFrame()\r\n df0 = df.copy()\r\n df0 = df.select_dtypes(include=np.number)\r\n\r\n for i in df0[cluster_col].unique():\r\n df2 = df0[df0[cluster_col] == i]\r\n df2.drop(cluster_col,axis=1, inplace=True)\r\n #df2 = df.select_dtypes(include=np.number)\r\n scores[i] = df2.std() / (df2.max() - df2.min())\r\n scores['mean'] = scores.mean(axis = 1)\r\n\r\n scores = 1 - scores\r\n\r\n return scores", "def __kmeans__(cls, cluster_size, pca_reduced, names: list):\n import warnings\n warnings.filterwarnings(\"ignore\")\n clusterer = KMeans(n_clusters=cluster_size, random_state=10)\n cluster_labels = clusterer.fit_predict(pca_reduced)\n result = list()\n result.append(ClusterProcessor.davies_bouldin(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.variance_ratio_criterion(cluster_labels, pca_reduced, cluster_size, names))\n result.append(ClusterProcessor.silhouette_coefficient(cluster_labels, pca_reduced, cluster_size, names))\n return result", "def wca_mean(X, k, df):\n\t\n\n\t# Intializing the clusters\t\n\tC = dict()\n\tfor cluster in range(k):\n\t C[cluster] = pd.DataFrame()\n\n\t# Calculating the mean vector\n\tmean_vector = X.mean()\n\n\t# Choosing the seed points based on the minimum distance from the mean vector\n\tX['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mean_vector)), axis=1)\n\tdist_means = X.sort_values(by='dist_mean')\n\t\n\t# Dropping the the datapoints which have already been assigned as seed\n\tidx_to_drop = dist_means.index[:k]\n\tdist_means.reset_index(drop=True,inplace=True)\n\tX.drop('dist_mean',axis=1,inplace=True)\n\tX.drop(idx_to_drop, inplace=True)\n\n\t# Assigning seed points to the clusters\n\tmu = list()\n\tfor cluster in range(k):\n\t C[cluster] = C[cluster].append(dist_means.iloc[cluster].drop('dist_mean'))\n\t mu.append(C[cluster].mean())\n\t\n\t# Running the algorithm\t\n\t\n\t# Initializing the p-value list which would be used for plotting\n\tpval = dict()\n\n\tfor cluster in range(k):\n\t pval[cluster] = dict()\n\t for i in C[0].columns:\n\t pval[cluster][i] = list()\n\n\t# Algorithm\n\tfor i in tqdm(range(int(len(X)/k)), desc='Iterations: '):\n\t for cluster in range(k):\n\n\t # Calculating the distances from the mean vector of eaimportch cluster (in Descending order)\n\t X['dist_mean'] = X.apply(lambda x: np.linalg.norm(np.asarray(x)- np.asarray(mu[cluster])), axis=1)\n\t dist_means = X.sort_values(by='dist_mean', ascending=False)\n\t idx_to_drop = dist_means.index[0]\n\t dist_means.reset_index(drop=True,inplace=True)\n\t X.drop('dist_mean',axis=1,inplace=True)\n\n\t # Assigning the top value to the cluster\n\t C[cluster] = C[cluster].append(dist_means.iloc[0].drop('dist_mean'))\n\t C[cluster] = C[cluster].reset_index(drop=True)\n\t \n\t # Updating means of each cluster\n\t mu[cluster] = C[cluster].mean()\n\n\t # Remove datapoint from X?\n\t X.drop(idx_to_drop,inplace=True)\n\t \n\t for i in C[0].columns:\n\t pval[cluster][i].append(sc.ks_2samp(C[cluster][i],df.drop('target',axis=1)[i])[1])\n\n\treturn(C,pval)", "def evaluate(self):\n results = dict()\n for metric in self.metrics:\n print('Evaluating clustering with metric %s' % metric)\n if metric in LABEL_METRICS.keys():\n results[metric] = LABEL_METRICS[metric](self.X, self.model.labels_)\n results['adjusted_rand_score'] = SCORE_METRICS['adjusted_rand_score'](self.Y[:, 0], self.model.labels_)\n self.results = results\n return results", "def grouping(data_clust):\n data_grouped = data_clust.groupby('Clusters').mean()\n return data_grouped", "def byMeans(dataset, number_of_clusters, class_header=\"Class\", verbosity=0, return_clusters=False):\n if verbosity >= 2: # optionally print dataset shape and info\n print(dataset.shape)\n print(dataset)\n\n old_dataset = dataset.copy()\n dataset = dataset.drop(columns=class_header) # remove non-float class column\n\n # Assign centroids to random values which fit into dataset space.\n centroids = pandas.DataFrame(columns=dataset.columns,\n data=numpy.random.uniform(dataset.min(), dataset.max(),\n (number_of_clusters, dataset.shape[1])))\n if verbosity >= 1: # optionally print centroids and random dataset\n print(\"INITIAL CENTROIDS\")\n print(centroids)\n if verbosity >= 2:\n print(\"DATAFRAME DATASET\")\n print(dataset)\n\n for iterations in range(MAX_ITERATIONS): # Loop until MAX_ITERATIONS or settled\n if verbosity >= 1: # optionally print iteration count\n print(\"ITERATIONS\")\n print(iterations)\n\n # calculate clustering of data\n clusters = Cluster.calcClusters(dataset, centroids, number_of_clusters, verbosity=verbosity)\n\n old_centroids = centroids.copy() # copy centroid dataframe\n\n if verbosity >= 2: # optionally print cluster list\n print(\"DATAFRAME ARRAY CLUSTERS\")\n print(clusters)\n\n for cluster_index, cluster in enumerate(clusters): # Calculate new centroids\n cluster_mean = cluster.mean()\n if not cluster_mean.isnull().any(): # make sure we dont write null means to centroid list\n centroids.loc[cluster_index] = cluster_mean\n\n if verbosity >= 1:\n print(\"OLD CENTROIDS\")\n print(old_centroids)\n print(\"NEW CENTROIDS\")\n print(centroids)\n\n if old_centroids is not None: # Calculate sum of centroid movements.\n centroid_change = 0\n for centroid_index, centroid in centroids.iterrows():\n centroid_change += abs(Cluster.calcDistance(centroid, old_centroids.loc[centroid_index]))\n\n if verbosity >= 1:\n print(\"CENTROID DIFF\")\n print(centroid_change)\n\n if centroid_change < SETTLE_THRESHOLD: # break if centroid movement is below threshold.\n break\n\n # Final Cluster re-calculation\n clusters = Cluster.calcClusters(old_dataset, centroids, number_of_clusters,\n verbosity=verbosity, class_header=class_header)\n # Create new dataframe with class column of and row for each centroid\n centroids_class = pandas.DataFrame(data=[\"NOCLASS\"] * centroids.shape[0], columns=[class_header])\n if verbosity >= 2:\n print(centroids_class)\n print(centroids)\n for cluster_index, cluster in enumerate(clusters): # For each cluster\n if verbosity >= 2:\n print(cluster_index)\n print(cluster)\n if cluster.size > 0: # If cluster is not empty set centroid class to most common class in cluster\n centroids_class.iat[cluster_index, 0] = cluster.mode().loc[0][0]\n if old_dataset.columns[0] == class_header: # check if class column should be first or last.\n print(\"CLASS IS FIRST COL\")\n centroids = pandas.concat([centroids_class, centroids], axis=1) # merge class to centroids as first column\n else:\n print(\"CLASS IS NOT FIRST COL\")\n centroids = pandas.concat([centroids, centroids_class], axis=1) # merge class to centroids as last column\n for centroid in centroids.iterrows(): # For each centroid\n if centroid[1][class_header] is \"NOCLASS\": # Trim NOCLASS centroids (empty cluster)\n centroids = centroids.drop(centroid[0])\n centroids = centroids.reset_index(drop=True) # Reindex centroids\n\n if return_clusters is True: # optionally return cluster list\n return centroids, clusters\n pass\n else:\n return centroids # return centroids dataframe", "def compute_means(opts, train_data, sampler):\n exp_names = train_data[\"exp_names\"].value\n means = []\n stds = []\n if opts[\"flags\"].normalize is True:\n running_stats = []\n # a running stat for each channel\n running_stats = RunningStats(3)\n # loop over the experiments\n\n # for exp_name in exp_names:\n for j in range(0, len(exp_names), 2):\n batch = sampler.get_minibatch()\n exp_name = batch[2][0]\n print(exp_name)\n # loop over the keys\n\n seq_len = train_data[\"exps\"][exp_name][\"labels\"].shape[0]\n temp_feat = batch[0].cpu().numpy()\n temp_feat = temp_feat[:seq_len, :, :, :]\n\n channel_feats = []\n for i in range(3):\n # channel_feat = temp_feat[0, :, i, :]\n # sample frames\n channel_feat = temp_feat[::100, i, :]\n channel_feat = channel_feat.reshape(-1, 1)\n channel_feats.append(channel_feat)\n\n channel_feats = np.concatenate(channel_feats, axis=1)\n running_stats.add_data(\n channel_feat\n )\n\n means = running_stats.mean.tolist()\n stds = running_stats.compute_std().tolist()\n else:\n means = [.5, .5, .5]\n stds = [1, 1, 1]\n # for key in opts[\"flags\"].feat_keys:\n # temp_feat = train_data[\"exps\"][exp_names[0]][key].value\n # mean = np.zeros((temp_feat.shape[2], ))\n # std = np.ones((temp_feat.shape[2], ))\n # means.append(mean)\n # stds.append(std)\n normalize = transforms.Normalize(mean=means,\n std=stds)\n\n return normalize", "def media(self):\n self.kmeans = [[] for i in range(0,self.cluster_number)]\n for i in range(self.cluster_number):\n for j in range(0,len(self.cluster[i][0])):\n self.kmeans[i].append(np.sum(self.cluster[i][::,j:j+1:])/len(self.cluster[i][::,j:j+1:]))", "def cluster(self,method=\"kmeans\",properties=None,k=3):\n try :\n from sklearn.cluster import KMeans, Ward\n from sklearn import __version__\n except :\n logger.warning(\"install scikits-learning package\")\n return\n X = [] #List of feature vector of each blob\n if not properties:\n properties = ['color','shape','position']\n if k > len(self):\n logger.warning(\"Number of clusters cannot be greater then the number of blobs in the featureset\")\n return\n for i in self:\n featureVector = []\n if 'color' in properties:\n featureVector.extend(i.mAvgColor)\n if 'shape' in properties:\n featureVector.extend(i.mHu)\n if 'position' in properties:\n featureVector.extend(i.extents())\n if not featureVector :\n logger.warning(\"properties parameter is not specified properly\")\n return\n X.append(featureVector)\n\n if method == \"kmeans\":\n \n # Ignore minor version numbers.\n sklearn_version = re.search(r'\\d+\\.\\d+', __version__).group()\n \n if (float(sklearn_version) > 0.11):\n k_means = KMeans(init='random', n_clusters=k, n_init=10).fit(X)\n else:\n k_means = KMeans(init='random', k=k, n_init=10).fit(X)\n KClusters = [ FeatureSet([]) for i in range(k)]\n for i in range(len(self)):\n KClusters[k_means.labels_[i]].append(self[i])\n return KClusters\n\n if method == \"hierarchical\":\n ward = Ward(n_clusters=int(sqrt(len(self)))).fit(X) #n_clusters = sqrt(n)\n WClusters = [ FeatureSet([]) for i in range(int(sqrt(len(self))))]\n for i in range(len(self)):\n WClusters[ward.labels_[i]].append(self[i])\n return WClusters", "def get_profit_per_cluster(df: pd.core.frame.DataFrame) -> pd.core.frame.DataFrame:\n return pd.DataFrame(df.groupby(by='cluster')['profit'].mean(), columns=['profit']).reset_index()", "def kmeans_cluster(X_train_input, n_clusters=100):\r\n from sklearn.cluster import MiniBatchKMeans\r\n image_descriptors = []\r\n [image_descriptors.extend(ORB_feature_extractor(img)) for img in X_train_input]\r\n image_descriptors = np.array(image_descriptors) \r\n \r\n kmeans_model = MiniBatchKMeans(n_clusters=n_clusters, init_size=5*n_clusters,\r\n random_state=34, batch_size=128).fit(image_descriptors)\r\n \r\n return kmeans_model", "def compute_cluster_ensemble(var, indicesOnCluster, maxIndices, indicesToParticle): #{{{\n\n num_clusters = maxIndices.shape[0]\n if len(var.shape) == 1:\n meanvar = np.zeros((num_clusters,))\n elif len(var.shape) == 2:\n meanvar = np.zeros((var.shape[0],num_clusters))\n else:\n warnings.warn('did not have correct shape for ' + str(var) + ' with len(var.shape)='+ str(len(var.shape)))\n meanvar = None\n\n for aCluster, maxInd in enumerate(maxIndices):\n # get particles in cluster\n particles = indicesToParticle[indicesOnCluster[aCluster,0:maxInd]]\n\n # compute mean depending upon size of array\n if len(var.shape) == 1:\n meanvar[aCluster] = np.mean(var[particles])\n if len(var.shape) == 2:\n meanvar[:,aCluster] = np.mean(var[:,particles], axis=1)\n\n return meanvar #}}}", "def evaluation_k_means(X_selected, n_clusters, y, n_jobs = 1):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=n_jobs)\n \n k_means.fit(X_selected)\n y_predict = k_means.labels_\n \n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict)\n \n # calculate ACC\n y_permuted_predict = best_map(y, y_predict)\n acc = accuracy_score(y, y_permuted_predict)\n \n return nmi, acc", "def specKmeans(self, n_clusters, spectralptsfile):\n self.classifier = \"Spectral-KMeans\"\n self.inptsfile = spectralptsfile \n points = self.loadPoints()\n points = points[self.validhit_bool, :]\n # points = self.randomForestTransform(points, 5, 10)\n\n print \"Running KMeans clustering on spectral data only ...\"\n points = StandardScaler(copy=False).fit_transform(points)\n mbk = MiniBatchKMeans(n_clusters=n_clusters)\n mbk.fit(points)\n self.labels[self.validhit_bool] = mbk.labels_", "def _eval_clustering(self, gen_reviews, clusters, embedding_model, clustering):\n result = []\n preds = self.predict_gen(gen_reviews, embedding_model, clustering)\n\n acc = accuracy_score(np.array(clusters), np.array(preds))\n conf = confusion_matrix(np.array(clusters), np.array(preds))\n\n return acc, conf", "def cluster_by_partitioning(active_sites):\n cls, sc = k_means(active_sites)\n\n return cls", "def evaluate(self, clustering):\n # Pca for each one of the clusters\n pca_mean_val = 0.;\n MAX_ELEMENTS = 1000\n for c in clustering.clusters:\n # Pick the coordinates (ensuring that we are copying them)\n element_indexes = c.all_elements\n ###################\n # Performance hack\n ###################\n # As it can be very slow for big clusters (i.e. > 3k elements) we'll compress this clusters \n # before calculating PCA. It should increase variance but will allow calculations.\n # It should use the kmedoids compressor\n if len(c.all_elements) > MAX_ELEMENTS:\n element_indexes = c.get_random_sample(MAX_ELEMENTS)\n print \"[PCA] Random sampling too big cluster to improve performance (%d elements -> %d elements).\"%(len(c.all_elements),MAX_ELEMENTS)\n ###################\n \n fitting_coordinates_of_this_cluster = self.fitting_coordinates[element_indexes]\n \n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster)\n \n if self.calculation_coordinates is not None:\n calculation_coordinates_of_this_cluster = self.calculation_coordinates[element_indexes]\n calculator = RMSDCalculator(calculatorType = \"QTRFIT_SERIAL_CALCULATOR\",\n fittingCoordsets = fitting_coordinates_of_this_cluster,\n calculationCoordsets = calculation_coordinates_of_this_cluster)\n \n # Make an iterative superposition (to get the minimum RMSD of all with respect to a mean conformation)\n calculator.iterativeSuperposition()\n\n # Calculate the covariance matrix\n if self.calculation_coordinates is None:\n covariance_matrix = PCAMetric.create_covariance_matrix(fitting_coordinates_of_this_cluster)\n else:\n covariance_matrix = PCAMetric.create_covariance_matrix(calculation_coordinates_of_this_cluster)\n \n # And then the eigenvalue we are interested in\n pca_mean_val += PCAMetric.calculate_biggest_eigenvalue(covariance_matrix)\n print \"PCA finished\"\n return pca_mean_val /clustering.total_number_of_elements", "def k_means_model(df, numOfClusters):\n # Perform scaling on the dataframe containing the selected features\n data = scale(df)\n\n # Train a model\n model = KMeans(init=\"k-means++\", n_clusters=numOfClusters, n_init=20).fit(data)\n return model", "def get_means(self):\n if self.cv_method == 'fixed':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n elif self.cv_method == 'crossvalidation':\n perf = np.mean(self.evaluations, axis=0)\n perf = np.nanmean(perf, axis=-1)\n else:\n perf = self.evaluations\n while len(perf.shape) > 2:\n perf = np.nanmean(perf, axis=-1)\n perf = perf[~np.isnan(perf[:, 0])]\n perf = np.mean(perf, axis=0)\n return perf", "def _compute_centroids(self):\n\n for i in range(0, self.k):\n cluster = np.argwhere(self.assigned_clusters == i)\n cluster_points = self.data[cluster].squeeze()\n self.centroids[i] = np.mean(cluster_points, axis=0)", "def mean_cluster(self, labelled_cluster):\n sum_of_points = self.sum_cluster(labelled_cluster)\n size_cluster = len(labelled_cluster)\n if self.sigma_cl1:\n size_cluster += np.sqrt(2)*self.sigma_cl1*np.random.randn()\n mean_of_points = sum_of_points * (1.0 / size_cluster)\n return mean_of_points", "def calculate_kmeans(df, clusters=10):\r\n kmeans = KMeans(n_clusters=clusters)\r\n labels = kmeans.fit_predict(df)\r\n\r\n return kmeans, labels", "def get_cluster_centers(args=None, autoencoder=None, cluster_number=2, dataloader_list=None,\n file_path=None, save_name=None, device='cpu'):\n\n if file_path: # Load centers from file and return them on device\n print(\"Loading pretrained KMeans centroids\")\n centers = np.loadtxt(file_path)\n cluster_centers = torch.tensor(\n centers, dtype=torch.float, requires_grad=True).to(device)\n else: # Train Kmeans and generate centers\n # https://github.com/vlukiyanov/pt-dec/blob/11b30553858c1c146a5ee0b696c768ab5244f0ff/ptdec/model.py#L74-L92\n print(\"Training KMeans for centroids\")\n kmeans = KMeans(n_clusters=cluster_number,\n n_init=args.cluster_n_init, random_state=args.seed, max_iter=args.cluster_max_step)\n autoencoder.eval()\n features = []\n actual = []\n\n # merge dataloaders\n concat_dataset = torch.utils.data.ConcatDataset([x.dataset for x in dataloader_list])\n\n dataloader = torch.utils.data.DataLoader(\n dataset=concat_dataset,\n batch_size=args.encoder_bs\n )\n\n # form initial cluster centres\n data_iterator = tqdm(dataloader,\n leave=True,\n unit=\"batch\",\n disable=False,\n )\n print(\"Generating features for kmeans\")\n\n with torch.no_grad():\n # Loop through data and generate features from the encoder. \n for index, batch in enumerate(data_iterator):\n if (isinstance(batch, tuple) or isinstance(batch, list)) and len(batch) == 2:\n # if we have a prediction label, separate it to actual\n batch, value = batch\n actual.append(value)\n # Assuming we use the encoder from module.py\n if args.encoder_type == 'vae':\n feature = autoencoder(batch.to(device))\n elif args.encoder_type == 'resnet50':\n feature = list()\n z = autoencoder(batch.to(device)) # [:,:args.dfc_hidden_dim]\n\n feature.append(z)\n\n features.append(feature[0].detach().cpu())\n print(\"Training samples:\", len(features))\n\n actual = torch.cat(actual).long() # Save labels as long in torch tensor.\n samples = torch.cat(features)\n print(f\"Data shape {samples.shape}\")\n print(f\"Labels shape {actual.shape}\")\n print(\"Training...\")\n predicted = kmeans.fit_predict(samples.numpy(), actual) # predict centers from features.\n _, accuracy = cluster_accuracy(predicted, actual.cpu().numpy()) # Compute accuracy of predictions\n cluster_centers = kmeans.cluster_centers_ # define centers\n\n if save_name: # If param. save_name then save the centers.\n filepath = args.log_dir + save_name + \".txt\"\n if not os.path.exists(args.log_dir):\n os.mkdir(args.log_dir)\n print(\"Saving clusters to:\", filepath)\n np.savetxt(filepath, cluster_centers)\n if not (wandb.run is None): # check if wandb is running\n wandb.run.summary[f\"{save_name}_accuracy\"] = accuracy\n\n cluster_centers = torch.tensor( # Convert centers to tensor and send to device.\n cluster_centers, dtype=torch.float, requires_grad=True\n ).to(device)\n print(f\"Training KMeans completed, accuracy: {accuracy:.2f}\")\n return cluster_centers", "def evaluation(X_selected, X_test, n_clusters, y):\n k_means = KMeans(n_clusters=n_clusters, init='k-means++', n_init=10, max_iter=300,\n tol=0.0001, precompute_distances=True, verbose=0,\n random_state=None, copy_x=True, n_jobs=1)\n\n k_means.fit(X_selected)\n y_predict = k_means.predict(X_test)\n\n # calculate NMI\n nmi = normalized_mutual_info_score(y, y_predict, average_method='arithmetic')\n\n sil = silhouette_score(X_test, y_predict, metric=\"euclidean\")\n db_score = davies_bouldin_score(X_test, y_predict)\n ch_score = calinski_harabasz_score(X_test, y_predict)\n purity = calcolaPurity(y, y_predict)\n\n return nmi, sil, db_score, ch_score, purity", "def compute_clusters(addons_df, num_clusters, random_seed):\n\n # Build the stages of the pipeline. We need hashing to make the next\n # steps work.\n hashing_stage = HashingTF(inputCol=\"addon_ids\", outputCol=\"hashed_features\")\n idf_stage = IDF(inputCol=\"hashed_features\", outputCol=\"features\", minDocFreq=1)\n # As a future improvement, we may add a sane value for the minimum cluster size\n # to BisectingKMeans (e.g. minDivisibleClusterSize). For now, just make sure\n # to pass along the random seed if needed for tests.\n kmeans_kwargs = {\"seed\": random_seed} if random_seed else {}\n bkmeans_stage = BisectingKMeans(k=num_clusters, **kmeans_kwargs)\n pipeline = Pipeline(stages=[hashing_stage, idf_stage, bkmeans_stage])\n\n # Run the pipeline and compute the results.\n model = pipeline.fit(addons_df)\n return model.transform(addons_df).select([\"client_id\", \"prediction\"])", "def _fit(\n self,\n x,\n clusters=50,\n a=5,\n Niter=15,\n device=None,\n backend=None,\n approx=False,\n n=50,\n ):\n if type(clusters) != int:\n raise ValueError(\"Clusters must be an integer\")\n if clusters >= len(x):\n raise ValueError(\"Number of clusters must be less than length of dataset\")\n if type(a) != int:\n raise ValueError(\"Number of clusters to search over must be an integer\")\n if a > clusters:\n raise ValueError(\n \"Number of clusters to search over must be less than total number of clusters\"\n )\n if len(x.shape) != 2:\n raise ValueError(\"Input must be a 2D array\")\n if self.__normalise:\n x = x / self.tools.repeat(self.tools.norm(x, 2, -1), x.shape[1]).reshape(\n -1, x.shape[1]\n )\n\n # if we want to use the approximation in Kmeans, and our metric is angular, switch to full angular metric\n if approx and self.__metric == \"angular\":\n self.__update_metric(\"angular_full\")\n\n x = self.tools.contiguous(x)\n self.__device = device\n self.__backend = backend\n\n cl, c = self.tools.kmeans(\n x,\n self.__distance,\n clusters,\n Niter=Niter,\n device=self.__device,\n approx=approx,\n n=n,\n )\n\n self.__c = c\n cl = self.__assign(x)\n\n ncl = self.__k_argmin(c, c, k=a)\n self.__x_ranges, _, _ = cluster_ranges_centroids(x, cl)\n\n x, x_labels = self.__sort_clusters(x, cl, store_x=True)\n self.__x = x\n r = self.tools.repeat(self.tools.arange(clusters, device=self.__device), a)\n self.__keep = self.tools.to(\n self.tools.zeros([clusters, clusters], dtype=bool), self.__device\n )\n self.__keep[r, ncl.flatten()] = True\n\n return self", "def kmeans(X, n_clust):\n\n X = scale(X)\n estimator = KMeans(init = 'k-means++', n_clusters = n_clust, n_init = 10, verbose = 2)\n \n estimator.fit(X)\n labels = estimator.predict(X)\n return labels" ]
[ "0.8101285", "0.63563937", "0.6221742", "0.6118103", "0.6082124", "0.60302347", "0.5986253", "0.59648865", "0.5944979", "0.58651084", "0.5829008", "0.582223", "0.58143276", "0.5798003", "0.5790433", "0.57677114", "0.5756863", "0.5707412", "0.5699087", "0.56949383", "0.56886315", "0.56758934", "0.56720513", "0.5658921", "0.5607623", "0.5605864", "0.5599382", "0.5592022", "0.55710906", "0.55625606" ]
0.8028111
1
Concatenates all arrays with duplicated IDs. Arrays with the same ids are stacked in chronological order. Caveat This method is not guaranteed to preserve the order of the list.
def concat_duplicate_ids(self) -> None: # Rebuilt list instead of removing duplicated one at a time at the cost of O(n). self.data.clear() # This implementation takes advantage of the ordering of the duplicated in the __init__ method has_external_ids = set() for ext_id, items in self._external_id_to_item.items(): if not isinstance(items, list): self.data.append(items) if items.id is not None: has_external_ids.add(items.id) continue concatenated = DatapointsArray.create_from_arrays(*items) self._external_id_to_item[ext_id] = concatenated if concatenated.id is not None: has_external_ids.add(concatenated.id) self._id_to_item[concatenated.id] = concatenated self.data.append(concatenated) if not (only_ids := set(self._id_to_item) - has_external_ids): return for id_, items in self._id_to_item.items(): if id_ not in only_ids: continue if not isinstance(items, list): self.data.append(items) continue concatenated = DatapointsArray.create_from_arrays(*items) self._id_to_item[id_] = concatenated self.data.append(concatenated)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def combine_ids(ids):\r\n return hash_all(sorted(ids)) # We sort so that the id isn't sensitive to order.\r", "def flat_unique(ls):\n return list(unique(chain.from_iterable(ls), key=id))", "def remove_duplicates_array(self, array: List) -> List:\n seen = set()\n seen_add = seen.add\n return [x for x in array if not (x in seen or seen_add(x))]", "def remove_duplicates(id_list):\n id_set = set(id_list)\n id_set_dup = id_set.intersection(id_set)\n id_set_diff = id_set.symmetric_difference(id_set)\n id_set_unique = id_set_dup.union(id_set_diff)\n id_list_unique = list(id_set_unique)\n\n return id_list_unique", "def concat_all(self):\n return self.merge(1)", "def merge_uniq(*args):\n return list(set(merge(*args)))", "def _deduplicate(lst):\n out = []\n for i in lst:\n if i not in out:\n out.append(i)\n return out", "def removeDuplicatesInList(self, data):\n newDataList = []\n for i in data:\n if newDataList.count(i) == 0:\n newDataList.append(i)\n data.clear()\n data += newDataList", "def Deduplicate(items):\n seen = set()\n for it in items:\n if it not in seen:\n seen.add(it)\n yield it", "def _deduplicate(self, rids):\n # Deduplication\n filtered = []\n for rid in rids:\n for ext in self.exts[rid]:\n find_merge = False\n for exts_other in filtered:\n if self._do_merge(ext, exts_other):\n exts_other.append(ext)\n find_merge = True\n break\n if not find_merge:\n filtered.append([ext])\n # Update attr/pol information for each group\n groups = []\n for exts in filtered:\n att = self._find_majority([ext.att for ext in exts])\n pol = self._find_majority([ext.pol for ext in exts])\n groups.append({\"exts\": exts, \"att\": att, \"pol\": pol})\n return groups", "def concat_old_ids(old, new):\n\n ids = [x for x in new.columns if 'ID' in x]\n\n for i, row in new.iterrows():\n info = pd.DataFrame()\n for c in ids:\n if row[c].find(',') != -1:\n for sp in row[c].split(' , '):\n info = info.append(old.loc[(old.Phase == row.Phase) & (old[c] == sp)])\n for col in info.columns:\n if col == 'Page' and len(pd.unique(info[col])) > 1:\n info.loc[:, col] = 'Varies'\n if '#' not in col and 'Description' not in col:\n info.loc[:, col] = ' , '.join([t for t in sorted(pd.unique(info[col])) if t])\n elif '#' in col:\n info.loc[:, col] = info.loc[:,col].sum()\n info = info.drop_duplicates()\n info.index = range(len(info))\n if not info.empty:\n for sp in row[c].split(' , '):\n old.loc[(old.Phase == row.Phase) & (old[c] == sp)] = info.loc[0].tolist()\n old = old.drop_duplicates()\n return old", "def __group_alt_atoms__(self, atoms):\n def ordering_key(atoms):\n return atoms[0].alt_id\n alt_ids = coll.defaultdict(list)\n for atom in atoms:\n alt_ids[atom.alt_id].append(atom)\n\n if len(alt_ids) == 1:\n return list(alt_ids.values())\n\n if None in alt_ids:\n common = alt_ids.pop(None)\n for alt_id, specific_atoms in list(alt_ids.items()):\n for common_atom in common:\n copied = copy.deepcopy(common_atom)\n copied.alt_id = alt_id\n specific_atoms.append(copied)\n\n return sorted(list(alt_ids.values()), key=ordering_key)", "def remove_duplicates_for_fetch(items: list, last_fetched_ids: list) -> list:\n return [\n item\n for item in items\n if item.get('id') and item.get('id') not in last_fetched_ids\n ]", "def seqs_to_toks(self,id_array,remove_pads=True):\n return [self.ids_to_toks(seq,remove_pads)\n for seq in id_array]", "def _get_dups_recursively(self, ticket_id):\n res = []\n\n ids = self._get_dups(ticket_id)\n for i in ids:\n res.append(i)\n res.extend(self._get_dups_recursively(i))\n\n return res", "def remove_duplicates(a):\n b = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))\n dedup = np.unique(b).view(a.dtype).reshape(-1, a.shape[1])\n return dedup", "def _MergeRecords(self, records, stored_records):\n if not stored_records:\n return\n new_ids = {r.id for r in records}\n records.extend(r for r in stored_records\n if getattr(r, 'id', None) not in new_ids)", "def array_merge(a1, a2, inplace=False, empty_source=False): \n if inplace:\n out = a1\n else:\n out = copy.deepcopy(a1)\n if empty_source:\n for i in range(len(out)):\n out.pop()\n for k in a2:\n out[k] = a2[k]\n return out", "def _check_all_different_ids(das):\n ids = np.zeros(len(das), dtype=np.int64)\n ids_val = np.zeros(len(das), dtype=np.int64)\n for j, da in enumerate(das):\n ids[j] = id(da)\n ids_val[j] = id(da.values)\n\n if len(ids) != len(np.unique(ids)):\n # DataArrays not unique! - find first duplicate and report error\n das = list(das)\n u, c = np.unique(ids, return_counts=True)\n dups = u[c > 1]\n for dup in dups:\n jj = np.where(ids == dup)[0]\n Dataset._id_of_DataArrays_equal(das[jj[0]], das[jj[1]])\n if len(ids_val) != len(np.unique(ids_val)):\n # DataArray *values* not unique! - find first duplicate and report error\n das = list(das)\n u, c = np.unique(ids_val, return_counts=True)\n dups = u[c > 1]\n for dup in dups:\n jj = np.where(ids_val == dup)[0]\n Dataset._id_of_DataArrays_equal(das[jj[0]], das[jj[1]])", "def concat_without_duplicates(dfs):\n temp_dfs = []\n for temp_df in dfs:\n # Joining the different dfs resulted in a df with more rows. This is why\n # I do this. More info on https://stackoverflow.com/a/34297689/5031446\n # This removes rows with duplicated indexes and keeps just the last observation\n temp_df = temp_df[~temp_df.index.duplicated(keep='last')]\n temp_dfs.append(temp_df)\n result = pd.concat(temp_dfs, axis=1)\n\n return result", "def chain_unique(*iterables):\n seen = set()\n for element in iterables:\n for item in element:\n k = item.id\n if k not in seen:\n seen.add(k)\n yield item", "def remove_duplicate_items(cls, items_in, prior_batch_ids):\n items_out = []\n item_ids = set(prior_batch_ids)\n for item in items_in:\n if item[\"id\"] not in item_ids:\n item_ids.add(item[\"id\"])\n items_out.append(item)\n else:\n continue\n return items_out", "def cat_arrays(arr1, arr2):\n res = []\n for i in arr1:\n res.append(i)\n for j in arr2:\n res.append(j)\n return (res)", "def merge(datasets: Sequence[\"Dataset\"]) -> \"Dataset\":\n ds = datasets[0].copy()\n for dsj in datasets[1:]:\n ds = ds._append_items(dsj, copy=False)\n\n return ds", "def deduped(items):\n \n return list(set(items))", "def cat_arrays(arr1, arr2):\n return [x for x in arr1+arr2]", "def combine_games(all_data):\n\tcombined_fields = concat_data_fields(all_data)\n\tcombined_data = pd.DataFrame(columns=combined_fields)\n\tgids = np.unique(all_data['Id'])\n\tfor gid in gids:\n\t\tgames = all_data[all_data['Id'] == gid]\n\t\tassert games.shape[0] == 2, \"Should have found 2 games here\"\n\t\tconcat_games = concat_game_rows(games, combined_fields)\n\t\tcombined_data = combined_data.append(pd.Series(concat_games[0]), ignore_index=True)\n\t\tcombined_data = combined_data.append(pd.Series(concat_games[1]), ignore_index=True)\n\treturn combined_data", "def remove_duplicates_in_items(items: list, id_key: str) -> list:\n ids = {}\n new_items = []\n for item in items:\n item_id = item.get(id_key)\n if item_id not in ids:\n ids[item_id] = True\n new_items.append(item)\n\n return new_items", "def dedupe_list(input):\n return list(set(input))", "def remove_duplicates(lst):\n (els, inds) = np.unique(lst, return_index=True)\n out = np.zeros(lst.shape, dtype=lst.dtype)\n out[inds] = els\n return out" ]
[ "0.6108058", "0.6034065", "0.56852865", "0.5684234", "0.5662172", "0.5658944", "0.5629316", "0.5507034", "0.5439302", "0.54389274", "0.5396021", "0.53948885", "0.53855747", "0.538059", "0.53791827", "0.53729856", "0.5346434", "0.53187525", "0.52948135", "0.5288018", "0.5275198", "0.52678084", "0.5257759", "0.5255326", "0.52422917", "0.5189013", "0.51872844", "0.51785123", "0.5171465", "0.51618713" ]
0.7894388
0
Get a specific DatapointsArray from this list by id or exernal_id.
def get( # type: ignore [override] self, id: Optional[int] = None, external_id: Optional[str] = None, ) -> Union[None, DatapointsArray, List[DatapointsArray]]: # TODO: Question, can we type annotate without specifying the function? return super().get(id, external_id) # type: ignore [return-value]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get( # type: ignore [override]\n self,\n id: Optional[int] = None,\n external_id: Optional[str] = None,\n ) -> Union[None, Datapoints, List[Datapoints]]:\n # TODO: Question, can we type annotate without specifying the function?\n return super().get(id, external_id) # type: ignore [return-value]", "def get_by_id(self, id: str) -> \"Dataset\":\n raise NotImplementedError", "def get(id: str) -> DataSet:\n pass", "def get_data_by_id(data_id):\n return Data.get_by_id(data_id)", "def get_dataset(self, name, multi_instance=0):\n return [elem for elem in self._data_list\n if elem.name == name and elem.multi_id == multi_instance][0]", "def _point_array(self, objects, unique_id):\n points = []\n ids = []\n for idx, row in tqdm(objects.iterrows(), total=objects.shape[0]):\n if row[\"geometry\"].type in [\"Polygon\", \"MultiPolygon\"]:\n poly_ext = row[\"geometry\"].boundary\n else:\n poly_ext = row[\"geometry\"]\n if poly_ext is not None:\n if poly_ext.type == \"MultiLineString\":\n for line in poly_ext:\n point_coords = line.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n elif poly_ext.type == \"LineString\":\n point_coords = poly_ext.coords\n row_array = np.array(point_coords[:-1]).tolist()\n for i, a in enumerate(row_array):\n points.append(row_array[i])\n ids.append(row[unique_id])\n else:\n raise Exception(\"Boundary type is {}\".format(poly_ext.type))\n return points, ids", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def one(self, did: int, daterange: Tuple[dt, dt]):\n try:\n with self.data.app.app_context():\n from app.models import Device as MD\n device = (MD.query.\n filter(MD.device_id == did).first())\n dn = device_check(device.device_name, DataSource.JIANYANYUAN)\n except AttributeError:\n logger.warning(\"[JianYanYuanData] fetch spot_record failed, \"\n + \"device is not in database\")\n return iter([])\n except WrongDidException:\n logger.warning('[Jianyanyuan] fetch spot_record failed, '\n + 'device not in database')\n return iter([])\n\n # get one from the list.\n device_res = [d for d in self.device_list\n if d.get(\"deviceId\") == dn].pop()\n\n param = self._make_datapoint_param(device_res, daterange)\n if param is None:\n return iter([])\n\n return self._gen([param])", "def __getitem__(cls, data_id):\n\n if data_id in cls._data:\n return cls._data[data_id]\n\n raise KeyError(f'Global data ID \"{data_id}\" not defined.')", "def get_id(self, ID):\n for i in range(self.data.shape[0]):\n if self.data[i][DATA_ID_INDEX] == ID:\n return self.data[i]", "def _dense_point_array(self, geoms, distance, index):\n # interpolate lines to represent them as points for Voronoi\n points = []\n ids = []\n\n if pygeos.get_type_id(geoms[0]) not in [1, 2, 5]:\n lines = pygeos.boundary(geoms)\n else:\n lines = geoms\n lengths = pygeos.length(lines)\n for ix, line, length in zip(index, lines, lengths):\n if length > distance: # some polygons might have collapsed\n pts = pygeos.line_interpolate_point(\n line,\n np.linspace(0.1, length - 0.1, num=int((length - 0.1) // distance)),\n ) # .1 offset to keep a gap between two segments\n points.append(pygeos.get_coordinates(pts))\n ids += [ix] * len(pts)\n\n points = np.vstack(points)\n\n return points, ids\n\n # here we might also want to append original coordinates of each line\n # to get a higher precision on the corners", "def getitem_array(self, key):\n if isinstance(key, type(self)):\n key = key.to_pandas().squeeze(axis=1)\n\n def getitem_array(df, key):\n return df[key]\n\n return DataFrameDefault.register(getitem_array)(self, key)", "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def soleDataPoint(self):\n dps = self.datapoints()\n if dps:\n return dps[0]", "def get_coords(data, id):\n return data[id]['lat'], data[id]['lon']", "def __getitem__(self, index):\n\n if self._data_indices is not None:\n index = self._data_indices[index]\n data = self._dataset[index]\n return data", "def get_data(self, line_id):\n # check\n if line_id not in self._lineDict:\n raise KeyError('Line ID %s does not exist.' % str(line_id))\n\n # get line\n line = self._lineDict[line_id]\n if line is None:\n raise RuntimeError('Line ID %s has been removed.' % line_id)\n\n return line.get_xdata(), line.get_ydata()", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get_data_source_by_user(self, username: str = None, id: int = None):\n all_data_sources_from_user_array = []\n user = None\n try:\n try:\n if username is not None:\n user = dict_to_model(\n User, UserService.get_user_by_username(self, username))\n elif id is not None:\n user = dict_to_model(User,\n UserService.get_user_by_id(self, id))\n except Exception:\n raise\n\n if user is not None:\n for data_source in DataSource.select(\n DataSource, user).where(DataSource.user == user):\n all_data_sources_from_user_array.append(\n model_to_dict(data_source))\n return all_data_sources_from_user_array\n except Exception:\n raise", "def read_by_id(self, id, fields=None):\n assert id is not None, \"id can not be None\"\n return self.read_many_by_id([id], fields)[0]", "def _get_data(self, species_id, fit_id):\n return self.raw_results[fit_id][species_id]", "def get_by_id(oai_data_id):\n try:\n return OaiData.objects.get(pk=str(oai_data_id))\n except ObjectDoesNotExist as exception:\n raise exceptions.DoesNotExist(str(exception))\n except Exception as ex:\n raise exceptions.ModelError(str(ex))", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {str(query_parameters)}\")\n obs = Observations.find_observation_by_observation_id(id)\n\n top, skip, expand_code, selects = parse_args(query_parameters)\n if obs:\n ds_list = Datastreams.filter_by_id(\n obs.datastream_id, expand_code, selects\n )\n response = jsonify(ds_list)\n\n else:\n response = jsonify({\"message\": \"No Observations with given Id found\"})\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def data_setup_datapoints():\n datapoints_list = []\n datapoints_list.append(helper_datapoints(\"datapoints1\", [(10, 0), (5, 1), (15, 2), (5, 3)]).get_id())\n datapoints_list.append(helper_datapoints(\"datapoints2\", [(1, 0), (2, 0.5), (3, 1.0), (4, 1.5)]).get_id())\n return datapoints_list", "def getSpectraDataForOneEvent(self, event_id=1, particleName=\"pion\", pT_range=None, where=\"\", orderBy=\"pT\"):\n whereClause = \"event_id=%d and pid=%d\" % (event_id, self._pid(particleName))\n if pT_range:\n whereClause += \" and %g<=pT and pT<=%g\" % (pT_range[0], pT_range[1])\n if where:\n whereClause += \" and \" + where\n return np.asarray(self.db.selectFromTable(\"spectra\", (\"pT\", \"N\"), whereClause=whereClause, orderByClause=orderBy))", "def FindElementById(self, id):\r\n for element in self.__listOfElements:\r\n if element.get_studentID() == id:\r\n return element\r\n raise RepositoryError(\"Inexisting Element\")", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def select_by_id(parcel_id):\n sql = \"SELECT * FROM dostawy.przesylki WHERE przesylka_ID = %s;\"\n val = (parcel_id,)\n rows = DBconnector.fetch_query_parameters(sql, val)\n return _wrap_in_parcel_list(rows)" ]
[ "0.6054171", "0.5667537", "0.54831964", "0.54567707", "0.53983647", "0.5365511", "0.5321282", "0.51933724", "0.5130589", "0.5129982", "0.5032065", "0.50203216", "0.5008644", "0.49614123", "0.4957775", "0.49143854", "0.48843566", "0.48525918", "0.4837254", "0.48273316", "0.48233312", "0.4818455", "0.48157564", "0.47673935", "0.4758005", "0.4743228", "0.47357374", "0.47281295", "0.47138935", "0.46934974" ]
0.6824546
0
Get a specific Datapoints from this list by id or exernal_id.
def get( # type: ignore [override] self, id: Optional[int] = None, external_id: Optional[str] = None, ) -> Union[None, Datapoints, List[Datapoints]]: # TODO: Question, can we type annotate without specifying the function? return super().get(id, external_id) # type: ignore [return-value]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get( # type: ignore [override]\n self,\n id: Optional[int] = None,\n external_id: Optional[str] = None,\n ) -> Union[None, DatapointsArray, List[DatapointsArray]]:\n # TODO: Question, can we type annotate without specifying the function?\n return super().get(id, external_id) # type: ignore [return-value]", "def get(id: str) -> DataSet:\n pass", "def get_by_id(self, id: str) -> \"Dataset\":\n raise NotImplementedError", "def getbyid(self, id):\n\n return esd.retrieve(id)", "def get_data_by_id(data_id):\n return Data.get_by_id(data_id)", "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {str(query_parameters)}\")\n obs = Observations.find_observation_by_observation_id(id)\n\n top, skip, expand_code, selects = parse_args(query_parameters)\n if obs:\n ds_list = Datastreams.filter_by_id(\n obs.datastream_id, expand_code, selects\n )\n response = jsonify(ds_list)\n\n else:\n response = jsonify({\"message\": \"No Observations with given Id found\"})\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def lookup(cls, id: int):\n record = query_db(\n \"select id, amount, description, user_id from expenses where id = ?\",\n [id],\n one=True,\n )\n if record is None:\n raise NotFound()\n return cls(**record)", "def at(cls, _id):\n return cls.where(cls.primarykey == _id)", "def one(self, did: int, daterange: Tuple[dt, dt]):\n try:\n with self.data.app.app_context():\n from app.models import Device as MD\n device = (MD.query.\n filter(MD.device_id == did).first())\n dn = device_check(device.device_name, DataSource.JIANYANYUAN)\n except AttributeError:\n logger.warning(\"[JianYanYuanData] fetch spot_record failed, \"\n + \"device is not in database\")\n return iter([])\n except WrongDidException:\n logger.warning('[Jianyanyuan] fetch spot_record failed, '\n + 'device not in database')\n return iter([])\n\n # get one from the list.\n device_res = [d for d in self.device_list\n if d.get(\"deviceId\") == dn].pop()\n\n param = self._make_datapoint_param(device_res, daterange)\n if param is None:\n return iter([])\n\n return self._gen([param])", "def get_by_id(oai_data_id):\n try:\n return OaiData.objects.get(pk=str(oai_data_id))\n except ObjectDoesNotExist as exception:\n raise exceptions.DoesNotExist(str(exception))\n except Exception as ex:\n raise exceptions.ModelError(str(ex))", "def get_by_id(self, expense_id):\n return self._get_request({}, Expenses.GET_EXPENSE_BY_ID.format(expense_id))", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n\n ds_list = Datastreams.filter_by_sensor_id(\n id, top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def get_by_id(self, id):\n return Entry.all().filter('entry_id = ', id).get()", "def __getitem__(cls, data_id):\n\n if data_id in cls._data:\n return cls._data[data_id]\n\n raise KeyError(f'Global data ID \"{data_id}\" not defined.')", "def get(self, id):\r\n \r\n cursor = self._conn.cursor()\r\n query = 'SELECT d.ID id, d.Name name, d.ClassType type, d.Rescue rescue, CONCAT(o.FirstName, \" \", o.LastName) operator '\r\n query += 'FROM DroneStore d LEFT JOIN OperatorStore o ON d.Operators = o.ID WHERE d.ID = ' + str(id)\r\n cursor.execute(query)\r\n for (id, name, type, rescue, operator) in cursor:\r\n drone = Drone(name, type, rescue)\r\n drone.id = id\r\n drone.operator = operator\r\n return drone\r\n cursor.close()", "def get(self,id):\r\n person = get_one_by_persons_id(id=id)\r\n if not person:\r\n api.abort(404)\r\n else:\r\n return person", "def get_by_id(self, id: int):\n\n\t\traise NotImplemented", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def get_dataset(self, name, multi_instance=0):\n return [elem for elem in self._data_list\n if elem.name == name and elem.multi_id == multi_instance][0]", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get(self, id):\n try:\n query_parameters = request.args\n logging.debug(f\" query params - {query_parameters}\")\n top, skip, expand_code, selects = parse_args(query_parameters)\n\n ds_list = Datastreams.filter_by_thing_id(\n id, top, skip, expand_code, selects\n )\n response = jsonify(ds_list)\n response.status_code = 200\n except Exception as e:\n logging.warning(e)\n response = jsonify({\"message\": \"error\"})\n response.status_code = 400\n return response\n\n finally:\n return response", "def find(self, id, columns=None):\n if not columns:\n columns = ['*']\n\n return self.where('id', '=', id).first(1, columns)", "def find_by_id(id: int):\n exercise = Exercise.try_find_by_id(id)\n if not exercise:\n raise NotFound(EXERCISE_NOT_FOUND_MSG)\n return exercise", "def read_by_id(self, id, fields=None):\n assert id is not None, \"id can not be None\"\n return self.read_many_by_id([id], fields)[0]", "def get_entity_by_id(self, id):\n # url = '{}/ngsi-ld/v1/entities?type={}&offset={}&limit={}'.format(self.url, type, offset, limit)\n url = '{}/ngsi-ld/v1/entities/{}'.format(self.url, id)\n r = requests.get(url, headers=self.headers_ld)\n return r.json()", "def get(self, expense_id):\n url = base_url + expense_id\n resp = zoho_http_client.get(url, self.details, self.headers)\n return parser.get_expense(resp)", "def series_data(series_id, **kwargs):\n\n instance = Ceic._get_instance()\n\n kwargs[\"id\"] = series_id\n get_series_method = instance._series_facade.get_series_data\n result = instance._make_request(get_series_method, **kwargs)\n\n return result", "def get_item_by_id(self, id):\n results = self.table_connector.query(\n KeyConditionExpression=Key(self.primary_key).eq(id)\n )\n return results[\"Items\"][0] if \"Items\" in results else []" ]
[ "0.61743337", "0.59749645", "0.5939584", "0.58991456", "0.57974106", "0.5651259", "0.55687577", "0.55102324", "0.54533166", "0.5403147", "0.53751576", "0.53591925", "0.53410923", "0.53159577", "0.52451044", "0.5237156", "0.5236404", "0.52126646", "0.516919", "0.5158489", "0.51489794", "0.51396257", "0.51162547", "0.50761807", "0.50590307", "0.5054923", "0.5049051", "0.5026392", "0.5012345", "0.501208" ]
0.6380494
0
Paillier encryption of an Int64 plaintext. Paillier homomorphic addition only directly adds positive values, however, we would like to add both positive and negative values (i.e. int64 is signed). To achieve this, we will represent negative values with twos complement representation. Also, in order to detect overflow after adding multiple values, the 64 sign bit is extended (or replicated) all the way to the 96th bit and bits above 96 are all zeroes.
def EncryptInt64(self, plaintext, r_value=None): if not isinstance(plaintext, int) and not isinstance(plaintext, long): raise ValueError('Expected int or long plaintext but got: %s' % type(plaintext)) if plaintext < MIN_INT64 or plaintext > MAX_INT64: raise ValueError('Int64 values need to be between %d and %d but got %d' % (MIN_INT64, MAX_INT64, plaintext)) plaintext = self._Extend64bitTo96bitTwosComplement(plaintext) return self.Encrypt(plaintext, r_value=r_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _raw_mul(self, plaintext):\n if not isinstance(plaintext, int) and not isinstance(plaintext, type(mpz(1))) and not isinstance(plaintext, numpy.int64):\n raise TypeError('Expected ciphertext to be int, not %s' %\n type(plaintext))\n\n if plaintext < 0 or plaintext >= self.public_key.n:\n raise ValueError('Scalar out of bounds: %i' % plaintext)\n\n if self.public_key.n - self.public_key.max_int <= plaintext:\n # Very large plaintext, play a sneaky trick using inverses\n neg_c = invert(self.ciphertext(False), self.public_key.nsquare)\n neg_scalar = self.public_key.n - plaintext\n return powmod(neg_c, neg_scalar, self.public_key.nsquare)\n else:\n return powmod(self.ciphertext(False), plaintext, self.public_key.nsquare)", "def encrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, P, S)\n return merge64(xL, xR)", "def encode_i64(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.I64].LENGTH, True)", "def _enc(x: int) -> float:\n return 2 + x + (29 / (x ** 2 + (1 - x) ** 2))", "def encode_u64(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.U64].LENGTH, False)", "def encrypt(self,num):\n assert len(bin(num)) <= len(bin(self.kl.n))-self.s\n return itob64(pow(random.randint(0,(1<<self.s)-1)+(num<<self.s),self.kl.e,self.kl.n))", "def Add(self, ciphertext1, ciphertext2):\n for c in (ciphertext1, ciphertext2):\n if not isinstance(c, int) and not isinstance(c, long):\n raise ValueError('Expected int or long type for %s but got %s' %\n (c, type(c)))\n return ciphertext1 * ciphertext2 % self.nsquare", "def bigint_encode(val):\n val = operator.index(val)\n sz = (val.bit_length() + 7) // 8\n if val < 0:\n hdr = HDR_BIAS - sz\n rval = -val ^ ((1 << (sz * 8)) - 1)\n else:\n hdr = HDR_BIAS + sz\n rval = val\n return int_to_bytes(hdr, HDR_LEN, 'big') + int_to_bytes(rval, sz, 'big')", "def add_int64(self, value):\n self._check_int_type(value, _INT_8BYTE_UPPERLIMIT)\n self._data += value.to_bytes(8, byteorder=\"little\")", "def EncryptMultipleInt64s(self, numberlist, r_value=None):\n plaintext = 0\n number_counter = 0\n if len(numberlist) > PACKING_LIMIT:\n raise ValueError('The number of entries in the input list cannot be'\n + ' more than %d' % (PACKING_LIMIT))\n for entry in numberlist:\n if not isinstance(entry, int) and not isinstance(entry, long):\n raise ValueError('Expected int or long but got: %s' % type(number))\n if entry < MIN_INT64 or entry > MAX_INT64:\n raise ValueError('Int64 values need to be between %d and %d but got %d'\n % (MIN_INT64, MAX_INT64, entry))\n entry = self._Extend64bitTo96bitTwosComplement(entry)\n if number_counter > 0:\n plaintext <<= PACKING_BIT_SIZE\n plaintext += entry\n number_counter += 1\n return self.Encrypt(plaintext, r_value=r_value)", "def enc_add(pub, m1, m2):\n add_result = m1 * m2 % pub.n_sq\n return add_result", "def __mul__(self, other):\n if isinstance(other, EncryptedNumber):\n raise NotImplementedError('Good luck with that...')\n if other < 0:\n other = other + self.public_key.n\n product = self._raw_mul(other)\n\n return EncryptedNumber(self.public_key, product)", "def add_no_carry(*args):\r\n num_digits = []\r\n\r\n for arg in args:\r\n num_digits.append(len(str(arg)))\r\n\r\n max_digits = max(num_digits)\r\n # list comprehension way\r\n # max_digits = max([len(str(arg)) for arg in args])\r\n final_sum = 0\r\n\r\n for pwr in range(1, max_digits + 1): # iterate through ea decimal\r\n result_no_carry = 0\r\n for arg in args:\r\n if len(str(arg)) >= pwr:\r\n # modulus sets the current decimal as the most significant\r\n # decimal\r\n # floor div selects the most significant decimal\r\n result_no_carry += arg % 10**pwr // 10**(pwr - 1)\r\n\r\n # list comprehension way\r\n # result_no_carry = sum([arg % 10**pwr // 10**(pwr - 1) for arg in args if len(str(arg)) >= pwr])\r\n\r\n # final_sum = str(result_no_carry % 10) + final_sum\r\n final_sum += result_no_carry % 10\r\n\r\n return int(final_sum)", "def Encrypt(self, plaintext, r_value=None):\n\n if not isinstance(plaintext, int) and not isinstance(plaintext, long):\n raise ValueError('Expected int or long type plaintext but got: %s' %\n type(plaintext))\n r = r_value or self._GetRandomFromZNStar(N_LENGTH, self.n)\n return (ModExp(self.g, plaintext, self.nsquare) *\n ModExp(r, self.n, self.nsquare)) % self.nsquare", "def _convert_int_to_i64(val):\n if val > 0x7FFFFFFFFFFFFFFF:\n val -= 0x10000000000000000\n return val", "def __lshift__(self, other):\r\n # TODO: extend to secret offset\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.mul(self, 1<<other)", "def add64(a,b):\n return(np.add(a, b, dtype=np.uint64))", "def EncryptFloat(self, plaintext, r_value=None):\n if not isinstance(plaintext, float):\n raise ValueError('Expected float plaintext but got: %s' % type(plaintext))\n\n input_as_long = struct.unpack('Q', struct.pack('d', plaintext))[0]\n mantissa = (input_as_long & 0xfffffffffffff) | 0x10000000000000\n exponent = ((input_as_long >> 52) & 0x7ff) - EXPONENT_BIAS\n sign = input_as_long >> (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS)\n if IsNan(plaintext):\n # Put a 1 in the 32 bit nan indicator field.\n plaintext = 0x00000001 << FLOAT_NAN_LSB # << 991\n elif IsInfPlus(plaintext):\n # Put a 1 in the 32 bit plus inf indicator field.\n plaintext = 0x00000001 << FLOAT_PLUSINF_LSB # << 959\n elif IsInfMinus(plaintext):\n # Put a 1 in the 32 bit minus inf indicator field.\n plaintext = 0x00000001 << FLOAT_MINUSINF_LSB # << 927\n elif exponent == 0 and mantissa == 0: # explicit 0\n plaintext = 0\n elif exponent > FLOAT_MANTISSA_ZERO: # > 389\n # Can't represent such large numbers\n raise ValueError('Floats with exponents larger than 389 are currently '\n 'not suppported.')\n elif exponent < -FLOAT_MANTISSA_ZERO - EXPLICIT_MANTISSA_BITS: # < -389 -52\n # too small, set to zero\n plaintext = 0\n else: # representable numbers with -441 <= exponent <= 389.\n # Place 53 bit mantissa (1 + 52 explicit bit mantissa in 831 bit payload\n # and shift according to exponent.\n # - first put 53 bit mantissa on the left most side of payload\n plaintext = mantissa << FLOAT_MANTISSA_LSB # << 778\n # - second shift right as needed.\n plaintext >>= (FLOAT_MANTISSA_ZERO - exponent) # >>= (389 - exponent)\n # Find 2s complement if number is negative\n if sign == 1: # neg number\n # make 895 bit (831 + 64 extended sign bits) 2s complement\n plaintext = (plaintext ^ _ONES_CARRYOVER_LSB) + 1L\n return self.Encrypt(plaintext, r_value=r_value)", "def calculateCrypt(asci: int, e: int, n: int) -> int:\n return pow(int(asci),e,n)", "def _add_scalar(self, scalar):\n\n a, b = self, scalar\n\n # Don't bother to salt/obfuscate in a basic operation, do it\n # just before leaving the computer.\n encrypted_scalar = a.public_key.raw_encrypt(b, 1)\n\n sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar)\n return EncryptedNumber(a.public_key, sum_ciphertext)", "def _one_sign(item, seed):\n return 1 if (xxh64(item, seed=seed).intdigest() & 1) else -1", "def encode(key: T) -> int:\n \n if isinstance(key, str):\n result: int = 0\n p: int = 97 # p should roughly equal the number of characters in the input alphabet, we have 95 printable ASII chars\n m: int = 32361122672259149 # now that's a prime :), 19th in OEIS A118839\n p_pow: int = 1\n for c in key:\n result = (result + ord(c) * p_pow) % m\n p_pow = (p_pow * p) % m\n return result\n elif isinstance(key, int):\n return key\n else:\n raise Exception(f\"Cannot encode {type(key)} (Only strings and integers are supported)\")", "def test_symmetry_negative_int(self):\n for x in range(1000):\n random_int = random.randint(-1 * sys.maxsize - 1, 0)\n encoded_int = base62.from_decimal(random_int)\n self.assertEqual(random_int, base62.to_decimal(encoded_int))", "def _raw_add(self, e_a, e_b):\n return e_a * e_b % self.public_key.nsquare", "def padlen_64(x: int):\n return (64 - (x % 64)) % 64", "def __pow__(self, other):\r\n # TODO: extend to secret exponent\r\n if not isinstance(other, int):\r\n return NotImplemented\r\n\r\n return runtime.pow(self, other)", "def test_bit_add_overflow_wrap(self):\n ops = [bitwise_operations.bit_add(self.five_255_bin, 0, 8, 1, False, aerospike.BIT_OVERFLOW_WRAP, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def Affine(self, ciphertext, a=1, b=0):\n # This works for raw paillier payload but may not for int64/float payload.\n # First multiply ciphertext with a\n a_mult_ciphertext = pow(ciphertext, a, self.nsquare)\n # Add b to it.\n return a_mult_ciphertext * pow(self.g, b, self.nsquare) % self.nsquare", "def test_symmetry_positive_int(self):\n for x in range(1000):\n random_int = random.randint(0, sys.maxsize)\n encoded_int = base62.from_decimal(random_int)\n self.assertEqual(random_int, base62.to_decimal(encoded_int))", "def raw_decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, type(mpz(1))) and not isinstance(scalar, numpy.int64):\n raise TypeError('Expected ciphertext to be an int, not: %s' %\n type(ciphertext))\n\n decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n value = self.crt(decrypt_to_p, decrypt_to_q)\n if value < self.n/3:\n return value\n else:\n return value - self.n" ]
[ "0.5790192", "0.576911", "0.56213176", "0.54513925", "0.5386436", "0.53785866", "0.5376329", "0.5372119", "0.5359393", "0.53206307", "0.5317017", "0.53136617", "0.5303635", "0.5297727", "0.52348304", "0.52161306", "0.51484716", "0.51110023", "0.508157", "0.50726026", "0.5064032", "0.501759", "0.50032854", "0.49988627", "0.49681213", "0.49381325", "0.4937158", "0.4916332", "0.48983175", "0.4897127" ]
0.652218
0
Paillier encryption of multiple 64 bit integers into a single payload.
def EncryptMultipleInt64s(self, numberlist, r_value=None): plaintext = 0 number_counter = 0 if len(numberlist) > PACKING_LIMIT: raise ValueError('The number of entries in the input list cannot be' + ' more than %d' % (PACKING_LIMIT)) for entry in numberlist: if not isinstance(entry, int) and not isinstance(entry, long): raise ValueError('Expected int or long but got: %s' % type(number)) if entry < MIN_INT64 or entry > MAX_INT64: raise ValueError('Int64 values need to be between %d and %d but got %d' % (MIN_INT64, MAX_INT64, entry)) entry = self._Extend64bitTo96bitTwosComplement(entry) if number_counter > 0: plaintext <<= PACKING_BIT_SIZE plaintext += entry number_counter += 1 return self.Encrypt(plaintext, r_value=r_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def pack_uint64s(data: List[int]) -> bytes:\n result = b\"\"\n for i in data:\n result += pack_uint64(i)\n return result", "def rop64(*args):\n\tpacked = \"\"\n\tfor x in args:\n\t\tif type(x) == int or type(x) == long:\n\t\t\tpacked += pack64(x)\n\t\telse:\n\t\t\tpacked += x\n\treturn packed", "def encrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, P, S)\n return merge64(xL, xR)", "def encode_payload(self, packets, b64=False):\n bytes = b''\n for packet in packets:\n packet_bytes = self.encode_packet(packet, b64)\n packet_len = len(packet_bytes)\n if b64:\n bytes += str(packet_len) + b':' + packet_bytes\n else:\n binary_len = b''\n while packet_len != 0:\n binary_len = six.int2byte(packet_len % 10) + binary_len\n packet_len = int(packet_len / 10)\n bytes += b'\\x01' if packet.binary else b'\\x00'\n bytes += binary_len + b'\\xff' + packet_bytes\n\n return bytes", "def Encode(chunk: bytes) -> bytes:\n return _UINT64.pack(len(chunk)) + chunk", "def encode_i64(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.I64].LENGTH, True)", "def p64(value: int, endian: str = \"little\") -> bytes:\n return pack(value, 64, endian)", "def EncryptInt64(self, plaintext, r_value=None):\n if not isinstance(plaintext, int) and not isinstance(plaintext, long):\n raise ValueError('Expected int or long plaintext but got: %s' %\n type(plaintext))\n if plaintext < MIN_INT64 or plaintext > MAX_INT64:\n raise ValueError('Int64 values need to be between %d and %d but got %d'\n % (MIN_INT64, MAX_INT64, plaintext))\n plaintext = self._Extend64bitTo96bitTwosComplement(plaintext)\n return self.Encrypt(plaintext, r_value=r_value)", "def p64(d):\n return pack('<Q', d)", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def pack_uint64(data: int) -> bytes:\n return struct.pack(\">Q\", data)", "def two_x64_concat(data):\n storage_key = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key.reverse()\n\n return storage_key + data", "def encode_payload(payload, key):\n encoded_payload = b''\n for b in payload:\n encoded_payload += bytes([b ^ key])\n\n return encoded_payload", "def ecb_encrypt(pt_bin_list, keys, rounds):\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(feistel_encrypt, zip(pt_bin_list, keys, repeat(rounds)))\n return enc_result", "def encode(input_: list):\n global n_bytes\n block = bytearray()\n\n for tup in input_:\n arr = np.array(tup[0], dtype=tup[1]).tobytes()\n n_bytes += len(arr)\n block += arr\n\n return block", "def DecryptMultipleInt64s(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n plaintext = self.Decrypt(ciphertext)\n decrypted_numbers = []\n for unused_i in range(PACKING_LIMIT):\n entry = plaintext & _ONES_96\n plaintext >>= PACKING_BIT_SIZE\n decrypted_numbers.insert(0, self._Unwrap96bitTo64bit(entry))\n return decrypted_numbers", "def encode_u64(value: int) -> bytes:\n return int_to_le_bytes(value, NUMERIC_CONSTRAINTS[CLTypeKey.U64].LENGTH, False)", "def chopstring(message, key, n, int_op):\n\n\n nbytes = block_size(n)\n\n msglen = len(message)\n blocks = msglen // nbytes\n\n if msglen % nbytes > 0:\n blocks += 1\n\n cypher = []\n \n for bindex in range(blocks):\n offset = bindex * nbytes\n block = message[offset:offset + nbytes]\n\n value = transform.bytes2int(block)\n to_store = int_op(value, key, n)\n\n cypher.append(to_store)\n\n return encode64chops(cypher) #Encode encrypted ints to base64 strings", "def generate_packed_encoder(wrapped_encoder):\n def length_wrapper(values):\n \"\"\"Encode repeat values and prefix with the length\"\"\"\n output = bytearray()\n for value in values:\n output += wrapped_encoder(value)\n length = varint.encode_varint(len(output))\n return length + output\n return length_wrapper", "def rop32(*args):\n\tpacked = \"\"\n\tfor x in args:\n\t\tif type(x) == int or type(x) == long:\n\t\t\tpacked += pack32(x)\n\t\telse:\n\t\t\tpacked += x\n\treturn packed", "def padlen_64(x: int):\n return (64 - (x % 64)) % 64", "def encrypt(self,num):\n assert len(bin(num)) <= len(bin(self.kl.n))-self.s\n return itob64(pow(random.randint(0,(1<<self.s)-1)+(num<<self.s),self.kl.e,self.kl.n))", "def pack_ssh_uint64(i):\n if not isinstance(i, int):\n raise TypeError(\"Must be an int\")\n elif i.bit_length() > 64:\n raise ValueError(\"Must be a 64bit value.\")\n\n return struct.pack('>Q', i)", "def join_bits(byteseq) -> int:\n return reduce(lambda acc, bit: (acc << 1) | int(bit), byteseq)", "def pack_varint_list(data: List[int]) -> bytes:\n result = b\"\"\n for value in data:\n result += pack_varint(value)\n return result", "def _pack_bytes(byte_list):\n return int.from_bytes(byte_list, 'big', signed=False)", "def ctr_encrypt(pt_bin_list, keys, rounds):\n msg = pt_bin_list\n nonce = generate_random_binary(len(pt_bin_list[0])-8) # Initialization Vector\n counter = range(0,len(msg))\n enc_result = \"\"\n\n with multiprocessing.Pool() as p:\n enc_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n enc_result.insert(0,nonce+\"00000000\") # Store padded IV to the start of ciphertext\n return enc_result", "def x64exe_example():\n \n text_store = \"010011010101101010010000000000000000001100000000000000000000000000000100000000000000000000000000111111111111111100000000000000001011100000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000101010000000000000000000000000000000111000011111101110100000111000000000101101000000100111001101001000011011100000000001010011001100110100100001010101000110100001101001011100110010000001110000011100100110111101100111011100100110000101101101001000000110001101100001011011100110111001101111011101000010000001100010011001010010000001110010011101010110111000100000011010010110111000100000010001000100111101010011001000000110110101101111011001000110010100101110000011010000110100001010001001000000000000000000000000000000000000000000000000000000000010001111011011100011101011011001110010110000111101010100100010101100101100001111010101001000101011001011000011110101010010001010110010110000111101010100100010101100101000001111010101001000101000011110011000100101011010001011110010100000111101010100100010100101001001101001011000110110100011001011000011110101010010001010010100000100010100000000000000000110010010000110000000100000000000000111110111101000111101011101000000000000000000000000000000000000000000000000000000000000000011110000000000000010001000000000000010110000001000001110000110000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000010000000000000100000000000000000000000000000000000100000000000000000000000000000000001000000000000000000000011000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000000000000000000000000000000000000001100000000000000000000000000000000001000000000000000000000000000000000000000000000000000000011000000000110000010000001000000000000000000010000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000011100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001011100111010001100101011110000111010000000000000000000000000000000101000000000000000000000000000000000001000000000000000000000000000000000010000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000001000000000000000000000011000000010111001110010011001000110000101110100011000010000000000000000010110000000000000000000000000000000000000100000000000000000000000000000000000100000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000100000000000000000000000100000010111000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000111110111101000111101011101000000000000000000000000000000000000110100000000000000000000000000111100000000000000000000000000000111000010000000000000000000000001110000000100000000000000000000000000000000000000000000000000000000000001000000000000000000000000010100000000000000000000000000101110011101000110010101111000011101000000000000000000000000000000000000100000000000000000000000011100000000000000000000000000001011100111001001100100011000010111010001100001000000000000000000011100001000000000000000000000001111000000000000000000000000000010111001110010011001000110000101110100011000010010010001111010011110100111101001100100011000100110011100000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000\"\n text.delete('1.0', tk.END) \n text.insert(tk.END, text_store) \n box=tk.Tk()\n m = tk.Message(box, text=\"In theory you could save this code as an .exe file and run it on a 64bit Intel chip machine, however we would strongly advise you not to :\\n1) it won't appear to do anything (see the code below) - there is no output; \\n2) there are no guarentees as to what will happen if you edit this code - you could damage your machine;\\n3) there are no guarentees that even as the code stands it will not damage your machine - it may run differently on different machines;\\n4) you should not run arbitrary binary code downloaded from someone you do not have reason to trust - while the code for this editor is open source, and therefore checkable, checking binary code is much harder. This software is licenced under a restricted licence that excludes adjustment of the source code (see ReadMe) but that doesn't mean someone hasn't if you didn't track the code back to the original site. Ultimately, there's really no way of trusting the code without trusting the site you downloaded it from.\\n\\nNevertheless, for information, this code was compiled from the following assembly:\\n\\n-----------------------------------\\nbits 64\\n\\ndefault rel\\n\\nsegment .text\\nglobal main\\n\\n mov rax, 0\\n-----------------------------------\\n\\nThe following command lines were used to compile it:\\n\\nnasm -f win64 -o file.obj file.asm\\n\\nlink file.obj /subsystem:console /entry:main /out:file.exe\\n\\nThe file ran from a command prompt.\")\n m.config(padx=50, pady=50, width=350)\n m.pack()", "def add_int64(self, value):\n self._check_int_type(value, _INT_8BYTE_UPPERLIMIT)\n self._data += value.to_bytes(8, byteorder=\"little\")", "def xxh128(data):\n storage_key1 = bytearray(xxhash.xxh64(data, seed=0).digest())\n storage_key1.reverse()\n\n storage_key2 = bytearray(xxhash.xxh64(data, seed=1).digest())\n storage_key2.reverse()\n\n return storage_key1 + storage_key2" ]
[ "0.6469461", "0.62068796", "0.61395895", "0.5897477", "0.5856876", "0.5820442", "0.57950085", "0.5781007", "0.57714444", "0.57406914", "0.57406914", "0.57341266", "0.5697893", "0.5689338", "0.5596759", "0.5524", "0.5515085", "0.5504577", "0.5504514", "0.5503515", "0.54212505", "0.5409587", "0.53895026", "0.5389288", "0.536955", "0.534308", "0.53340507", "0.53031814", "0.52908146", "0.5282005" ]
0.7172408
0
Paillier decryption of ciphertext into multiple int64 values.
def DecryptMultipleInt64s(self, ciphertext): if not isinstance(ciphertext, int) and not isinstance(ciphertext, long): raise ValueError('Expected int or long type ciphertext but got: %s' % type(ciphertext)) plaintext = self.Decrypt(ciphertext) decrypted_numbers = [] for unused_i in range(PACKING_LIMIT): entry = plaintext & _ONES_96 plaintext >>= PACKING_BIT_SIZE decrypted_numbers.insert(0, self._Unwrap96bitTo64bit(entry)) return decrypted_numbers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def DecryptInt64(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n plaintext = self.Decrypt(ciphertext)\n return self._Unwrap96bitTo64bit(plaintext)", "def Decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n u = ModExp(ciphertext, self.__lambda, self.nsquare)\n l_of_u = (u - 1) // self.n\n return (l_of_u * self.__mu) % self.n", "def decrypt(ciphertexts):\n e_x = []\n\n for i in range(3):\n c1, c2 = ciphertexts[i][x[i]]\n dec = elgamal.decrypt(c1, c2, secret_keys[i])\n e_x.append(str(bin(dec))[2:].zfill(16))\n\n return e_x", "def raw_decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, type(mpz(1))) and not isinstance(scalar, numpy.int64):\n raise TypeError('Expected ciphertext to be an int, not: %s' %\n type(ciphertext))\n\n decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n value = self.crt(decrypt_to_p, decrypt_to_q)\n if value < self.n/3:\n return value\n else:\n return value - self.n", "def decrypt(self,encr):\n return pow(b64toi(encr),self.kl.d,self.kl.n)>>self.s", "def decrypt_railfence(ciphertext, num_rails):\n if num_rails == 1:\n return ciphertext\n\n lists = slice_ciphertext(ciphertext, num_rails) # could use queue to simply the implementation once we got to OOP\n\n #print(lists)\n rows_indices = [0] * num_rails \n\n decrypted = ''\n row = -1\n dir = 1\n cipher_len = len(ciphertext)\n for i in range(cipher_len):\n row += dir\n decrypted += lists[row][rows_indices[row]]\n rows_indices[row] += 1\n if row == 0:\n dir = 1\n elif row == num_rails - 1:\n dir = -1\n return decrypted", "def decrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, tuple(reversed(P)), S)\n return merge64(xL, xR)", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def ctr_decrypt(ct_bin_list, keys, rounds):\n msg = ct_bin_list\n nonce = msg.pop(0)[:-8]\n counter = range(0,len(msg))\n dec_result = \"\"\n\n with multiprocessing.Pool() as p:\n dec_result = p.starmap(ctr_process, zip(msg, repeat(nonce), counter, keys, repeat(rounds)))\n\n return dec_result", "def decrypt(ciphertext, pad):\n\n return OR(ciphertext, pad)", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def decrypt(ciphertext: str) -> Iterable:\n return simplesubstitution.decrypt(KEY, ciphertext)", "def ecb_decrypt(ct_bin_list, keys, rounds):\n dec_result = \"\"\n\n with multiprocessing.Pool() as p:\n dec_result = p.starmap(feistel_decrypt, zip(ct_bin_list, keys, repeat(rounds)))\n return dec_result", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def decrypt(self, data):", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def cbc_decrypt(ct_bin_list, keys, rounds):\n ivector = ct_bin_list.pop(0)\n dec_result = []\n msg = ct_bin_list\n\n with multiprocessing.Pool() as p:\n x = p.starmap(feistel_decrypt, zip(msg, keys, repeat(rounds)))\n\n dec_result.append(xor_compare(x[0],ivector))\n if len(x) > 1:\n for i in range(1, len(x)):\n dec_result.append(xor_compare(x[i],msg[i-1]))\n\n return dec_result", "def decrypt_block(self, ciphertext):\n assert len(ciphertext) == 16\n\n cipher_state = bytes2matrix(ciphertext)\n\n add_round_key(cipher_state, self._key_matrices[-1])\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n\n for i in range(self.n_rounds - 1, 0, -1):\n add_round_key(cipher_state, self._key_matrices[i])\n inv_mix_columns(cipher_state)\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n \n add_round_key(cipher_state, self._key_matrices[0])\n\n return matrix2bytes(cipher_state)", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt_ctr(key, ciphertext):\n\tmessage = ''\n\tiv = ciphertext[0:16]\n\tfor i in range(16, len(ciphertext), 16):\n\t\tinputblock = ciphertext[i:i+16]\n\t\tcipher = AES.new(key, AES.MODE_ECB)\n\t\txorkey = cipher.encrypt(long_to_bytes(bytes_to_long(iv)+(i/16-1)))\n\t\tif len(inputblock) == 16:\n\t\t\tmessage += strxor(inputblock, xorkey)\n\t\telse:\n\t\t\tmessage += strxor(inputblock, xorkey[:len(inputblock)])\n\treturn message", "def decrypt_chunk(self, chunk):\n L, R = chunk\n # Run the feistel rounds as in encryption, but with keys going from n..1\n for i in range(self.number_of_rounds, 0, -1):\n L, R = self.feistel_round(L, R, i)\n return R, L", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def decryptAESCTR(key, nonce, ct):\n\tpt = b''\n\tcounter = 0\n\tfor ctBlock in chunks(ct, 16):\n\t\tblock = (int.from_bytes(nonce, byteorder='big') + counter).to_bytes(16, byteorder='big')\n\t\tencBlock = encryptAESBlock(key, block)\n\t\tpt += xor(ctBlock, encBlock)\t\t\n\t\tcounter += 1\n\treturn pt", "def decrypt(n, ciphtext):\r\n res = ''\r\n\r\n for l in ciphtext:\r\n try:\r\n i = (key.index(l) + n) % len(key)\r\n res += key[i]\r\n except ValueError:\r\n res += 1\r\n return res", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def _decrypt_bit(self, c1, c2, r, a):\n\n r2 = (r*r) % self.n\n x = c1 + 2*r if r2 == a else c2 + 2*r\n return gmpy2.jacobi(x, self.n)", "def decrypt(priv, pub, cipher):\n x = myExp(cipher, priv.lamb, pub.n_sq) - 1\n plain = ((x // pub.n) * priv.mu) % pub.n\n return plain", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt" ]
[ "0.6999377", "0.68856674", "0.67466295", "0.67262083", "0.6428826", "0.6360104", "0.6349768", "0.6308733", "0.6303354", "0.6211624", "0.61085457", "0.60960937", "0.60612965", "0.60026455", "0.5953625", "0.5937819", "0.59188956", "0.5888119", "0.58588505", "0.5841659", "0.58265865", "0.5797765", "0.5783952", "0.5770438", "0.57328916", "0.5714137", "0.5707753", "0.56689906", "0.56656396", "0.5665385" ]
0.7780901
0
Paillier decryption of ciphertext into a int64 value.
def DecryptInt64(self, ciphertext): if not isinstance(ciphertext, int) and not isinstance(ciphertext, long): raise ValueError('Expected int or long type ciphertext but got: %s' % type(ciphertext)) plaintext = self.Decrypt(ciphertext) return self._Unwrap96bitTo64bit(plaintext)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n u = ModExp(ciphertext, self.__lambda, self.nsquare)\n l_of_u = (u - 1) // self.n\n return (l_of_u * self.__mu) % self.n", "def raw_decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, type(mpz(1))) and not isinstance(scalar, numpy.int64):\n raise TypeError('Expected ciphertext to be an int, not: %s' %\n type(ciphertext))\n\n decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n value = self.crt(decrypt_to_p, decrypt_to_q)\n if value < self.n/3:\n return value\n else:\n return value - self.n", "def decrypt(self,encr):\n return pow(b64toi(encr),self.kl.d,self.kl.n)>>self.s", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def extract_ciphertext(self):\n return b64decode(self.cipherblock[\"ciphertext\"])", "def DecryptMultipleInt64s(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n plaintext = self.Decrypt(ciphertext)\n decrypted_numbers = []\n for unused_i in range(PACKING_LIMIT):\n entry = plaintext & _ONES_96\n plaintext >>= PACKING_BIT_SIZE\n decrypted_numbers.insert(0, self._Unwrap96bitTo64bit(entry))\n return decrypted_numbers", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt(ciphertext, pad):\n\n return OR(ciphertext, pad)", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, tuple(reversed(P)), S)\n return merge64(xL, xR)", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def decrypt_railfence(ciphertext, num_rails):\n if num_rails == 1:\n return ciphertext\n\n lists = slice_ciphertext(ciphertext, num_rails) # could use queue to simply the implementation once we got to OOP\n\n #print(lists)\n rows_indices = [0] * num_rails \n\n decrypted = ''\n row = -1\n dir = 1\n cipher_len = len(ciphertext)\n for i in range(cipher_len):\n row += dir\n decrypted += lists[row][rows_indices[row]]\n rows_indices[row] += 1\n if row == 0:\n dir = 1\n elif row == num_rails - 1:\n dir = -1\n return decrypted", "def decrypt(self, ciphertext):\n\n # Note that the state of the cipher is updated by each operation,\n # and the offset into the stream is implicit, which means that\n # it is almost always an error to use the encrypt and decrypt\n # methods of the same instance, so we do a simple check to ensure\n # that this isn't the case.\n #\n if self.prev_crypto_op and self.prev_crypto_op != self.decrypt:\n raise RuntimeError('Same instance used for encrypt/decrypt')\n self.prev_crypto_op = self.decrypt\n\n return self.rc4.update(ciphertext)", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def decrypt(priv, pub, cipher):\n x = myExp(cipher, priv.lamb, pub.n_sq) - 1\n plain = ((x // pub.n) * priv.mu) % pub.n\n return plain", "def decrypt(self, enc):\n\n enc = base64.b64decode(enc)\n iv = enc[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')", "def decrypt(self, data):", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext" ]
[ "0.76950616", "0.751412", "0.7208298", "0.6586208", "0.6520382", "0.6476167", "0.6406717", "0.6404868", "0.63836175", "0.6361718", "0.6336824", "0.6297889", "0.6279345", "0.6242041", "0.62175155", "0.6204211", "0.6166816", "0.61604214", "0.61572933", "0.61452925", "0.6128287", "0.6127148", "0.6105363", "0.6101631", "0.6101631", "0.6100891", "0.609386", "0.6078209", "0.6064298", "0.60606515" ]
0.7853171
0
Encrypt float (IEEE754 binary64bit) values with limited exponents. Paillier homomorphic addition only directly adds positive binary values, however, we would like to add both positive and negative float values
def EncryptFloat(self, plaintext, r_value=None): if not isinstance(plaintext, float): raise ValueError('Expected float plaintext but got: %s' % type(plaintext)) input_as_long = struct.unpack('Q', struct.pack('d', plaintext))[0] mantissa = (input_as_long & 0xfffffffffffff) | 0x10000000000000 exponent = ((input_as_long >> 52) & 0x7ff) - EXPONENT_BIAS sign = input_as_long >> (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS) if IsNan(plaintext): # Put a 1 in the 32 bit nan indicator field. plaintext = 0x00000001 << FLOAT_NAN_LSB # << 991 elif IsInfPlus(plaintext): # Put a 1 in the 32 bit plus inf indicator field. plaintext = 0x00000001 << FLOAT_PLUSINF_LSB # << 959 elif IsInfMinus(plaintext): # Put a 1 in the 32 bit minus inf indicator field. plaintext = 0x00000001 << FLOAT_MINUSINF_LSB # << 927 elif exponent == 0 and mantissa == 0: # explicit 0 plaintext = 0 elif exponent > FLOAT_MANTISSA_ZERO: # > 389 # Can't represent such large numbers raise ValueError('Floats with exponents larger than 389 are currently ' 'not suppported.') elif exponent < -FLOAT_MANTISSA_ZERO - EXPLICIT_MANTISSA_BITS: # < -389 -52 # too small, set to zero plaintext = 0 else: # representable numbers with -441 <= exponent <= 389. # Place 53 bit mantissa (1 + 52 explicit bit mantissa in 831 bit payload # and shift according to exponent. # - first put 53 bit mantissa on the left most side of payload plaintext = mantissa << FLOAT_MANTISSA_LSB # << 778 # - second shift right as needed. plaintext >>= (FLOAT_MANTISSA_ZERO - exponent) # >>= (389 - exponent) # Find 2s complement if number is negative if sign == 1: # neg number # make 895 bit (831 + 64 extended sign bits) 2s complement plaintext = (plaintext ^ _ONES_CARRYOVER_LSB) + 1L return self.Encrypt(plaintext, r_value=r_value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _enc(x: int) -> float:\n return 2 + x + (29 / (x ** 2 + (1 - x) ** 2))", "def _raw_add(self, e_a, e_b):\n return e_a * e_b % self.public_key.nsquare", "async def test_floating_point_encoding(self, r):\n await r.flushdb()\n timestamp = 1349673917.939762\n await r.zadd('a', timestamp, 'a1')\n assert await r.zscore('a', 'a1') == timestamp", "def DecryptFloat(self, ciphertext):\n original_plaintext = self.Decrypt(ciphertext)\n plaintext = original_plaintext\n mantissa_and_exponent = plaintext & _ONES_FLOAT_SIGN_LOW_LSB\n plaintext >>= FLOAT_SIGN_LOW_LSB # >>= 831\n sign_low32 = plaintext & 0xffffffff\n plaintext >>= 32\n sign_high32 = plaintext & 0xffffffff\n plaintext >>= 32\n # carry_over32 = plaintext & 0xffffffff\n plaintext >>= 32\n minus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n plus_inf32 = plaintext & 0xffffffff\n plaintext >>= 32\n nan_32 = plaintext & 0xffffffff\n if nan_32 > 0:\n return float('nan')\n # adding a +inf and -inf should return a nan\n if plus_inf32 > 0 and minus_inf32 > 0:\n return float('nan')\n if plus_inf32 > 0:\n return float('inf')\n if minus_inf32 > 0:\n return float('-inf')\n if sign_high32 == 0 and sign_low32 > 0:\n # This indicates that positive overflow has happened, mimic ieee float\n # behaviour and return +inf.\n return float('inf')\n if sign_high32 == 0xffffffff and sign_low32 < 0xffffffff:\n # This indicates that negative overflow has happened, mimic ieee float\n # behaviour and return -inf.\n return float('-inf')\n if sign_high32 == 0 and sign_low32 == 0:\n # positive finite number.\n if mantissa_and_exponent == 0L:\n return float(0)\n size = len(bin(mantissa_and_exponent)) - 2 # -2 to remove prepended 0b\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit i.e 52 bits.\n new_mantissa = ((mantissa_and_exponent >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((mantissa_and_exponent << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = (new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n if sign_high32 == 0xffffffff and sign_low32 == 0xffffffff:\n # negative finite number.\n # - first find the positive value of the number by taking the 2s\n # complement of the 895 bit integer.\n num = original_plaintext & _ONES_CARRYOVER_LSB\n positive_895bit_value = (num ^ _ONES_CARRYOVER_LSB) + 1L\n # - final value will mostly be a 831 bit number or smaller except if\n # 831 bits are all zero which represents -2^831 and gives a 2's complement\n # positive value of 2^831, we detect this case and return -inf.\n positive_832bit_value = positive_895bit_value & _ONES_832\n if positive_832bit_value >> FLOAT_SIGN_LOW_LSB: # >> 831:\n return float('-inf')\n size = len(bin(positive_832bit_value)) - 2\n if size >= MANTISSA_BITS:\n # take the first 53 bits and remove the leading 1 bit.\n new_mantissa = ((positive_832bit_value >> (size - MANTISSA_BITS))\n & 0xfffffffffffff)\n else:\n # take all the bits and shift left to make it a normal number,\n # the exponent also gets updated appropriately.\n new_mantissa = ((positive_832bit_value << (MANTISSA_BITS - size))\n & 0xfffffffffffff)\n new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO +\n EXPONENT_BIAS)\n new_value = ((new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa |\n (1 << (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS)))\n return struct.unpack('d', struct.pack('Q', new_value))[0]\n raise ValueError('Got an unusual decrypted value either nan, inf or sign '\n 'bits aren\\'t set correctly: %s' % hex(original_plaintext))", "def float_encode(self, value):\n if value < 128:\n code = value\n elif value > 31743:\n code = 255\n else:\n exp=0\n value>>=3\n while(value>31):\n exp+=1\n value>>=1\n exp<<=4\n code = 0x80 | exp | (value & 0x0F)\n return code", "def addFloat(self, f):\n self._buf.push(_F_STRUCT.pack(f))", "def write_float32(self, f: float) -> None:\n self.buffer += struct.pack(\"<f\", f)", "def encrypt(_g, _s, _e, _n, _m):\n\tr = gmpy2.xmpz(1)\n\tg = gmpy2.xmpz(_g)\n\ts = gmpy2.xmpz(_s)\n\te = gmpy2.xmpz(_e)\n\tn = gmpy2.xmpz(_n)\n\tm = gmpy2.xmpz(_m)\n\n\tb1 = f_mod(e, n)\n\tb1 = pow(g, pow(s, b1))\n\tb1 = mul(b1, f_mod(pow(r,m), pow(m,2)))\n\treturn b1", "def ff_add(a, b):\n return a ^ b", "def enc_add(pub, m1, m2):\n add_result = m1 * m2 % pub.n_sq\n return add_result", "def _(_: FloatType, value: float) -> bytes:\n return _FLOAT_STRUCT.pack(value)", "def exp(a: Decimal, b: Decimal) -> Decimal:\n return a ** b", "def _FloatingPointEncoder(wire_type, format):\n\n value_size = struct.calcsize(format)\n if value_size == 4:\n def EncodeNonFiniteOrRaise(write, value):\n # Remember that the serialized form uses little-endian byte order.\n if value == _POS_INF:\n write(b'\\x00\\x00\\x80\\x7F')\n elif value == _NEG_INF:\n write(b'\\x00\\x00\\x80\\xFF')\n elif value != value: # NaN\n write(b'\\x00\\x00\\xC0\\x7F')\n else:\n raise\n elif value_size == 8:\n def EncodeNonFiniteOrRaise(write, value):\n if value == _POS_INF:\n write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF0\\x7F')\n elif value == _NEG_INF:\n write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF0\\xFF')\n elif value != value: # NaN\n write(b'\\x00\\x00\\x00\\x00\\x00\\x00\\xF8\\x7F')\n else:\n raise\n else:\n raise ValueError('Can\\'t encode floating-point values that are '\n '%d bytes long (only 4 or 8)' % value_size)\n\n def SpecificEncoder(field_number, is_repeated, is_packed):\n local_struct_pack = struct.pack\n if is_packed:\n tag_bytes = TagBytes(field_number, wire_format.WIRETYPE_LENGTH_DELIMITED)\n local_EncodeVarint = _EncodeVarint\n def EncodePackedField(write, value, deterministic):\n write(tag_bytes)\n local_EncodeVarint(write, len(value) * value_size, deterministic)\n for element in value:\n # This try/except block is going to be faster than any code that\n # we could write to check whether element is finite.\n try:\n write(local_struct_pack(format, element))\n except SystemError:\n EncodeNonFiniteOrRaise(write, element)\n return EncodePackedField\n elif is_repeated:\n tag_bytes = TagBytes(field_number, wire_type)\n def EncodeRepeatedField(write, value, unused_deterministic=None):\n for element in value:\n write(tag_bytes)\n try:\n write(local_struct_pack(format, element))\n except SystemError:\n EncodeNonFiniteOrRaise(write, element)\n return EncodeRepeatedField\n else:\n tag_bytes = TagBytes(field_number, wire_type)\n def EncodeField(write, value, unused_deterministic=None):\n write(tag_bytes)\n try:\n write(local_struct_pack(format, value))\n except SystemError:\n EncodeNonFiniteOrRaise(write, value)\n return EncodeField\n\n return SpecificEncoder", "def encrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, P, S)\n return merge64(xL, xR)", "def compress(self,float32):\n\n F16_EXPONENT_BITS = 0x1F\n F16_EXPONENT_SHIFT = 10\n F16_EXPONENT_BIAS = 15\n F16_MANTISSA_BITS = 0x3ff\n F16_MANTISSA_SHIFT = (23 - F16_EXPONENT_SHIFT)\n F16_MAX_EXPONENT = (F16_EXPONENT_BITS << F16_EXPONENT_SHIFT)\n\n if type(float32) == float:\n f32 = self.unpack(float32)\n else:\n f32 = float32\n f16 = 0\n sign = (f32 >> 16) & 0x8000\n exponent = ((f32 >> 23) & 0xff) - 127\n mantissa = f32 & 0x007fffff\n \n if exponent == 128:\n f16 = sign | F16_MAX_EXPONENT\n if mantissa:\n f16 |= (mantissa & F16_MANTISSA_BITS)\n elif exponent > 15:\n f16 = sign | F16_MAX_EXPONENT\n elif exponent > -15:\n exponent += F16_EXPONENT_BIAS\n mantissa >>= F16_MANTISSA_SHIFT\n f16 = sign | exponent << F16_EXPONENT_SHIFT | mantissa\n else:\n f16 = sign\n return f16", "def addExponent(self):\n\t\t# if the exponent part is not set and this number is allowed an exponent\n\t\tif(self.exponent == None and self.allowExponent):\n\t\t\t# set the exponent to another number (disallowing exponents since we can't\n\t\t\t# have an exponent with an exponent\n\t\t\tself.exponent = Number(allowExponent = False)", "def encode_float(float_, length=None):\n\t\n\tif length not in (None, 0, 4, 8):\n\t\traise ValueError('Cannot encode floating point values with lengths other than 0, 4, or 8 bytes.')\n\tif float_ is None:\n\t\tfloat_ = 0.0\n\tif float_ == 0.0:\n\t\tif length is None:\n\t\t\tlength = 0\n\telse:\n\t\tif length is None:\n\t\t\tlength = 8\n\t\telif length == 0:\n\t\t\traise ValueError('Cannot encode floating point value %f as it would have an encoded representation longer than 0 bytes.' % float_)\n\t\n\tif length in (4, 8):\n\t\tdata = bytearray(struct.pack({\n\t\t\t4: '>f',\n\t\t\t8: '>d'\n\t\t}[length], float_))\n\telse:\n\t\tdata = bytearray()\n\t\n\treturn data", "def e_add(pub, a, b):\n return a * b % pub.n_sq", "def calculate_exponent():\n pass", "def float32_to_float8e5m2( # pylint: disable=too-many-statements\n fval: float,\n scale: float = 1.0,\n fn: bool = False,\n uz: bool = False,\n saturate: bool = True,\n) -> int:\n x = fval / scale\n b = int.from_bytes(struct.pack(\"<f\", np.float32(x)), \"little\")\n ret = (b & 0x80000000) >> 24 # sign\n\n if fn and uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x80\n if (b & 0x7FFFFFFF) == 0x7F800000:\n # inf\n if saturate:\n return ret | 0x7F\n return 0x80\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 109:\n pass\n elif e < 112:\n # denormalized number\n ex = e - 111\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 111\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7F:\n # rounding\n ret += 1\n elif not saturate:\n ret = 0x80\n elif e == 255 and m == 0: # inf\n ret = 0x80\n elif saturate:\n ret |= 0x7F # last possible number\n else:\n ret = 0x80\n elif m == 0:\n # -0\n ret = 0\n return int(ret)\n elif not fn and not uz:\n if (b & 0x7FC00000) == 0x7FC00000:\n return 0x7F | ret\n if np.isinf(x):\n if saturate:\n return 0x7B | ret\n return 0x7C | ret\n e = (b & 0x7F800000) >> 23 # exponent\n m = b & 0x007FFFFF # mantissa\n\n if e != 0:\n if e < 110:\n pass\n elif e < 113:\n # denormalized number\n ex = e - 112\n if ex >= -1:\n ret |= 1 << (1 + ex)\n ret |= m >> (22 - ex)\n elif m > 0:\n ret |= 1\n mask = 1 << (21 - ex)\n if m & mask and ( # pylint: disable=too-many-boolean-expressions\n ret & 1\n or m & (mask - 1) > 0\n or (m & mask and m & (mask << 1) and m & (mask - 1) == 0)\n ):\n # rounding\n ret += 1\n elif e < 143:\n # normalized number\n ex = e - 112\n ret |= ex << 2\n ret |= m >> 21\n if m & 0x100000 and ((m & 0xFFFFF) or (m & 0x200000)):\n if (ret & 0x7F) < 0x7B:\n # rounding\n ret += 1\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n elif saturate:\n ret |= 0x7B\n else:\n ret |= 0x7C\n return int(ret)\n else:\n raise NotImplementedError(\"fn and uz must be both False or True.\")", "def _add_scalar(self, scalar):\n\n a, b = self, scalar\n\n # Don't bother to salt/obfuscate in a basic operation, do it\n # just before leaving the computer.\n encrypted_scalar = a.public_key.raw_encrypt(b, 1)\n\n sum_ciphertext = a._raw_add(a.ciphertext(False), encrypted_scalar)\n return EncryptedNumber(a.public_key, sum_ciphertext)", "def zzx_add_mul(f, g, h):\n return zzx_add(f, zzx_mul(g, h))", "def enc_x(self, inputs, training=False):\n return self._enc_x(inputs, training)", "def to_sign_exponent_mantissa(value, exponent_bits=exponent_bits, mantissa_bits=mantissa_bits):\n float_mantissa, float_exponent = math.frexp(value)\n if (float_mantissa >= 0):\n sign = 0\n else:\n sign = 1\n exponent = int(float_exponent + 2**(exponent_bits - 1))\n mantissa = int(abs(float_mantissa) * 2**mantissa_bits)\n return sign, exponent, mantissa", "def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)", "def enc_add_const(pub, m, c):\n # Similiar to enc add\n add_const_result = m * powmod(pub.g, c, pub.n_sq) % pub.n_sq\n return add_const_result", "def zzX_add_mul(f, g, h):\n return zzX_add(f, zzX_mul(g, h))", "def test_op_pow_scalar_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n s = 0.7\n\n old = numpy.empty_like(a)\n old[:] = a[:]\n expect = pow(a, s)\n\n offl_a = stream.bind(a)\n offl_r = pow(offl_a, s)\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old).all(),\n \"Input array operand must not be modified: \"\n \"{0} should be {1}\".format(a, old))\n self.assertEqualEpsilon(r, expect,\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def inverse_exponential(x):\n return math.exp(-x)", "def local_exp_over_1_plus_exp(node):\r\n # this optimization should be done for numerical stability\r\n # so we don't care to check client counts\r\n if node.op == tensor.true_div:\r\n\r\n #find all the exp() terms in the numerator\r\n num, denom = node.inputs\r\n num_exp_x, num_rest, num_neg = partition_num_or_denom(num, is_exp)\r\n denom_1pexp, denom_rest, \\\r\n denom_neg = partition_num_or_denom(denom, is_1pexp)\r\n\r\n sigmoids = []\r\n for t in denom_1pexp:\r\n if t in num_exp_x:\r\n # case: exp(x) /(1+exp(x))\r\n sigmoids.append(sigmoid(t))\r\n del num_exp_x[num_exp_x.index(t)]\r\n else:\r\n # case: 1/(1+exp(x))\r\n sigmoids.append(sigmoid(-t))\r\n\r\n if not sigmoids: # we didn't find any. abort\r\n return\r\n # put the new numerator together\r\n new_num = sigmoids + [tensor.exp(t) for t in num_exp_x] + num_rest\r\n if len(new_num) == 1:\r\n new_num = new_num[0]\r\n else:\r\n new_num = tensor.mul(*new_num)\r\n\r\n if num_neg ^ denom_neg:\r\n new_num = -new_num\r\n\r\n if len(denom_rest) == 0:\r\n return [new_num]\r\n elif len(denom_rest) == 1:\r\n return [new_num / denom_rest[0]]\r\n else:\r\n return [new_num / tensor.mul(*denom_rest)]" ]
[ "0.5811039", "0.57055616", "0.5681594", "0.5667057", "0.55601025", "0.5532706", "0.5469459", "0.5463289", "0.54342914", "0.5426036", "0.53601795", "0.5341179", "0.5299423", "0.52942103", "0.52717775", "0.5256707", "0.52175176", "0.5214884", "0.5214496", "0.51952034", "0.5158577", "0.50784475", "0.5069362", "0.50409806", "0.5040043", "0.50382435", "0.50273275", "0.50084466", "0.5002518", "0.5001642" ]
0.73143375
0
Paillier decryption of ciphertext into a IEEE754 binary64 float value.
def DecryptFloat(self, ciphertext): original_plaintext = self.Decrypt(ciphertext) plaintext = original_plaintext mantissa_and_exponent = plaintext & _ONES_FLOAT_SIGN_LOW_LSB plaintext >>= FLOAT_SIGN_LOW_LSB # >>= 831 sign_low32 = plaintext & 0xffffffff plaintext >>= 32 sign_high32 = plaintext & 0xffffffff plaintext >>= 32 # carry_over32 = plaintext & 0xffffffff plaintext >>= 32 minus_inf32 = plaintext & 0xffffffff plaintext >>= 32 plus_inf32 = plaintext & 0xffffffff plaintext >>= 32 nan_32 = plaintext & 0xffffffff if nan_32 > 0: return float('nan') # adding a +inf and -inf should return a nan if plus_inf32 > 0 and minus_inf32 > 0: return float('nan') if plus_inf32 > 0: return float('inf') if minus_inf32 > 0: return float('-inf') if sign_high32 == 0 and sign_low32 > 0: # This indicates that positive overflow has happened, mimic ieee float # behaviour and return +inf. return float('inf') if sign_high32 == 0xffffffff and sign_low32 < 0xffffffff: # This indicates that negative overflow has happened, mimic ieee float # behaviour and return -inf. return float('-inf') if sign_high32 == 0 and sign_low32 == 0: # positive finite number. if mantissa_and_exponent == 0L: return float(0) size = len(bin(mantissa_and_exponent)) - 2 # -2 to remove prepended 0b if size >= MANTISSA_BITS: # take the first 53 bits and remove the leading 1 bit i.e 52 bits. new_mantissa = ((mantissa_and_exponent >> (size - MANTISSA_BITS)) & 0xfffffffffffff) else: # take all the bits and shift left to make it a normal number, # the exponent also gets updated appropriately. new_mantissa = ((mantissa_and_exponent << (MANTISSA_BITS - size)) & 0xfffffffffffff) new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO + EXPONENT_BIAS) new_value = (new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa return struct.unpack('d', struct.pack('Q', new_value))[0] if sign_high32 == 0xffffffff and sign_low32 == 0xffffffff: # negative finite number. # - first find the positive value of the number by taking the 2s # complement of the 895 bit integer. num = original_plaintext & _ONES_CARRYOVER_LSB positive_895bit_value = (num ^ _ONES_CARRYOVER_LSB) + 1L # - final value will mostly be a 831 bit number or smaller except if # 831 bits are all zero which represents -2^831 and gives a 2's complement # positive value of 2^831, we detect this case and return -inf. positive_832bit_value = positive_895bit_value & _ONES_832 if positive_832bit_value >> FLOAT_SIGN_LOW_LSB: # >> 831: return float('-inf') size = len(bin(positive_832bit_value)) - 2 if size >= MANTISSA_BITS: # take the first 53 bits and remove the leading 1 bit. new_mantissa = ((positive_832bit_value >> (size - MANTISSA_BITS)) & 0xfffffffffffff) else: # take all the bits and shift left to make it a normal number, # the exponent also gets updated appropriately. new_mantissa = ((positive_832bit_value << (MANTISSA_BITS - size)) & 0xfffffffffffff) new_exponent = ((size - MANTISSA_BITS) - FLOAT_MANTISSA_ZERO + EXPONENT_BIAS) new_value = ((new_exponent << EXPLICIT_MANTISSA_BITS) | new_mantissa | (1 << (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS))) return struct.unpack('d', struct.pack('Q', new_value))[0] raise ValueError('Got an unusual decrypted value either nan, inf or sign ' 'bits aren\'t set correctly: %s' % hex(original_plaintext))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def EncryptFloat(self, plaintext, r_value=None):\n if not isinstance(plaintext, float):\n raise ValueError('Expected float plaintext but got: %s' % type(plaintext))\n\n input_as_long = struct.unpack('Q', struct.pack('d', plaintext))[0]\n mantissa = (input_as_long & 0xfffffffffffff) | 0x10000000000000\n exponent = ((input_as_long >> 52) & 0x7ff) - EXPONENT_BIAS\n sign = input_as_long >> (EXPLICIT_MANTISSA_BITS + EXPONENT_BITS)\n if IsNan(plaintext):\n # Put a 1 in the 32 bit nan indicator field.\n plaintext = 0x00000001 << FLOAT_NAN_LSB # << 991\n elif IsInfPlus(plaintext):\n # Put a 1 in the 32 bit plus inf indicator field.\n plaintext = 0x00000001 << FLOAT_PLUSINF_LSB # << 959\n elif IsInfMinus(plaintext):\n # Put a 1 in the 32 bit minus inf indicator field.\n plaintext = 0x00000001 << FLOAT_MINUSINF_LSB # << 927\n elif exponent == 0 and mantissa == 0: # explicit 0\n plaintext = 0\n elif exponent > FLOAT_MANTISSA_ZERO: # > 389\n # Can't represent such large numbers\n raise ValueError('Floats with exponents larger than 389 are currently '\n 'not suppported.')\n elif exponent < -FLOAT_MANTISSA_ZERO - EXPLICIT_MANTISSA_BITS: # < -389 -52\n # too small, set to zero\n plaintext = 0\n else: # representable numbers with -441 <= exponent <= 389.\n # Place 53 bit mantissa (1 + 52 explicit bit mantissa in 831 bit payload\n # and shift according to exponent.\n # - first put 53 bit mantissa on the left most side of payload\n plaintext = mantissa << FLOAT_MANTISSA_LSB # << 778\n # - second shift right as needed.\n plaintext >>= (FLOAT_MANTISSA_ZERO - exponent) # >>= (389 - exponent)\n # Find 2s complement if number is negative\n if sign == 1: # neg number\n # make 895 bit (831 + 64 extended sign bits) 2s complement\n plaintext = (plaintext ^ _ONES_CARRYOVER_LSB) + 1L\n return self.Encrypt(plaintext, r_value=r_value)", "def __float__(self):\n return float(self.encoded) / (1 << self.frac_bits)", "def decrypt(x, P, S):\n xL, xR = split64(x)\n xL, xR = feistel(xL, xR, tuple(reversed(P)), S)\n return merge64(xL, xR)", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def Decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n u = ModExp(ciphertext, self.__lambda, self.nsquare)\n l_of_u = (u - 1) // self.n\n return (l_of_u * self.__mu) % self.n", "def decrypt(self,encr):\n return pow(b64toi(encr),self.kl.d,self.kl.n)>>self.s", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def raw_decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, type(mpz(1))) and not isinstance(scalar, numpy.int64):\n raise TypeError('Expected ciphertext to be an int, not: %s' %\n type(ciphertext))\n\n decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n value = self.crt(decrypt_to_p, decrypt_to_q)\n if value < self.n/3:\n return value\n else:\n return value - self.n", "def extract_ciphertext(self):\n return b64decode(self.cipherblock[\"ciphertext\"])", "def decrypt(self,e):\n if self._f is None or self._g is None:\n raise Exception(\"Private key not found.\")\n\n if e._N <= self._P.get_N():\n\n if not self._fp:\n self._fp = invert_in_p(self._f, self._P.get_N())\n if not self._fq:\n self._fq = invert_in2tor(self._f, self._P.get_N(), int(lg(self._P.get_q())))\n\n assert(self._h == self._fq * self._g)\n\n a = (self._f * e) % self._P.get_q()\n b = (self._fp * a) % self._P.get_p()\n\n return b # decrypted message\n else:\n raise Exception(\"e is too large, must be equal or under size %d\" % self._P.get_N())", "def decode(self, encoded_value):\n return float(encoded_value) / (1 << self.frac_bits)", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "def GetFloat(start, numBytes, ens):\n try:\n return struct.unpack(\"f\", ens[start:start + numBytes])[0]\n except Exception as e:\n logging.debug(\"Error creating a float from bytes. \" + str(e))\n return 0.0", "def val(self):\n return float(self.encoded) / (1 << self.frac_bits)", "def feistel_decrypt(ct_bin, key, rounds=2):\n dec_pairs = list(split_half(ct_bin))\n dec_key = proper_key(key, len(dec_pairs[0]))\n for i in reversed(range(1, rounds+1)):\n dec_pairs[0], dec_pairs[1] = xor_compare(dec_pairs[1], round_function(dec_pairs[0], dec_key, i)), dec_pairs[0]\n return ''.join(dec_pairs)", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def _decode_float(fp):\n return struct.unpack('>f', fp.read(4))[0]", "def decrypt(priv, pub, cipher):\n x = myExp(cipher, priv.lamb, pub.n_sq) - 1\n plain = ((x // pub.n) * priv.mu) % pub.n\n return plain", "def decrypt(self, data):", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack(\">HHL\", value)\n exponent = (short1 & 0x7F00) // 256 - 64\n mantissa = (\n ((short1 & 0x00FF) * 65536 + short2) * 4294967296 + long3\n ) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.0 ** exponent\n return mantissa * 16.0 ** exponent", "def bin_to_float64(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack(\">d\", bf)[0]", "def _eight_byte_real_to_float(value):\n short1, short2, long3 = struct.unpack('>HHL', value)\n exponent = (short1 & 0x7f00) // 256 - 64\n mantissa = (((short1 & 0x00ff) * 65536 + short2) * 4294967296 +\n long3) / 72057594037927936.0\n if short1 & 0x8000:\n return -mantissa * 16.**exponent\n return mantissa * 16.**exponent", "def decrypt(self, b):\n decrypted = self.__aes.ecbDecrypt(b)\n return unpadPkcs7(decrypted, 16)", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def bin_to_float(b):\n bf = int_to_bytes(int(b, 2), 8) # 8 bytes needed for IEEE 754 binary64.\n return struct.unpack('>d', bf)[0]", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def _decrypt(self, b, strip_padding=True):\n from cryptography.hazmat.primitives.ciphers \\\n import Cipher, algorithms, modes\n from cryptography.hazmat.backends import default_backend\n\n backend = default_backend()\n cypher = Cipher(\n algorithms.AES(self.__key), modes.CBC(self.__iv), backend=backend)\n decryptor = cypher.decryptor()\n result = decryptor.update(b) + decryptor.finalize()\n if strip_padding:\n result = result[:-result[-1]]\n return result" ]
[ "0.6348259", "0.6295682", "0.6129", "0.60495037", "0.6032528", "0.6025274", "0.5985032", "0.5756852", "0.5723989", "0.5710238", "0.5611292", "0.5578155", "0.55487376", "0.55423474", "0.5519347", "0.5498408", "0.54967207", "0.5486184", "0.54766923", "0.5475583", "0.54647607", "0.5457564", "0.5438994", "0.5420953", "0.5397627", "0.53843415", "0.5370524", "0.53602374", "0.53302634", "0.53228563" ]
0.7470246
0
Returns the number of bytes in the Bignum.
def _NumBytesBn(bn): if not _FOUND_SSL: raise RuntimeError('Cannot evaluate _NumBytesBn because ssl library was ' 'not found') size_in_bits = ssl.BN_num_bits(bn) return int(math.ceil(size_in_bits / 8.0))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_bytes(self) -> str:\n return pulumi.get(self, \"num_bytes\")", "def size_bytes(self) -> int:\n return pulumi.get(self, \"size_bytes\")", "def _get_nr_of_bits(self):\n return sum(self._size_var)", "def lenb(self) -> int:\n return self._core.lenb()", "def size_bytes(self):\n size_words = self.size_words()\n if size_words is None:\n return None\n return size_words * intnat.sizeof", "def nbytes(self) -> int:\n return self._nbytes(False)", "def NumBits(self):\n num_bits = 8*len(self.output)\n if self.out_boff % 8:\n num_bits -= 8\n num_bits += self.out_boff\n if num_bits < 0:\n print \"What the...\"\n return num_bits", "def __len__(self):\r\n return numBits(self.n)", "def size_as_number_of_bits(size):\n\n if size == 0:\n return 0\n else:\n return len('{:b}'.format(size))", "def nbytes(self):\n # Equivalent to self.itemsize * self.size\n return self.initial_value.nbytes", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n size += buf.size\n return size", "def nbytes(self):\n\n return self.data.type.datasize", "def totalbytes(self):\n with self.session as session:\n result = session.execute(select([func.sum(IndexRecord.size)])).scalar()\n if result is None:\n return 0\n return long(result)", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def bitSizeOf(self) -> int:\n\n return self._numBits", "def total_bytes_to_process(self) -> float:\n return pulumi.get(self, \"total_bytes_to_process\")", "def number_of_bits(self):\n return self.numbits", "def numero_bin(self):\n return self._numero_bin", "def bitSizeOf() -> int:\n\n return 64", "def byte_size(self) -> int:\n return pixel_formats[self._dtype][3] * self._components * self.width * self.height", "def nbytes(self):\n # type: () -> int\n size = 0\n for chunk in self.data.chunks:\n for buf in chunk.buffers():\n if buf is not None:\n size += buf.size\n return size", "def num_long_term_bytes(self) -> str:\n return pulumi.get(self, \"num_long_term_bytes\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def size_in_gb(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"size_in_gb\")", "def nbytes(self):\n return self.nnz * self.dtype.itemsize", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes", "def num_bytes(self):\n if self._num_bytes is None:\n status, info = self._resource._file.stat(\n timeout=(0 if self._timeout is None else self._timeout)\n )\n if not status.ok:\n raise OSError(\n \"\"\"XRootD error: {0}\nin file {1}\"\"\".format(\n status[\"message\"], self._file_path\n )\n )\n self._num_bytes = info.size\n\n return self._num_bytes", "def number_of_bits(self) -> int:\n raise NotImplementedError('To be Overidden by the derived class')", "def mbsize(x):\n return str(int(x / (1024 ** 2)))" ]
[ "0.76290935", "0.7256088", "0.7226742", "0.7202238", "0.69278276", "0.69196373", "0.684339", "0.6731235", "0.6718147", "0.6697299", "0.66349", "0.6583386", "0.65817887", "0.65500563", "0.65500563", "0.6529792", "0.6513275", "0.6505297", "0.64666075", "0.64448845", "0.64410853", "0.6417314", "0.64138556", "0.64138556", "0.64138556", "0.63985527", "0.63825756", "0.63825756", "0.6375262", "0.63554305" ]
0.7820821
0
Uses openssl, if available, to do a^b mod c where a,b,c are longs.
def ModExp(a, b, c): if not _FOUND_SSL: return pow(a, b, c) # convert arbitrary long args to bytes bytes_a = number.LongToBytes(a) bytes_b = number.LongToBytes(b) bytes_c = number.LongToBytes(c) # convert bytes to (pointer to) Bignums. bn_a = ssl.BN_bin2bn(bytes_a, len(bytes_a), 0) bn_b = ssl.BN_bin2bn(bytes_b, len(bytes_b), 0) bn_c = ssl.BN_bin2bn(bytes_c, len(bytes_c), 0) bn_result = ssl.BN_new() ctx = ssl.BN_CTX_new() # exponentiate and convert result to long ssl.BN_mod_exp(bn_result, bn_a, bn_b, bn_c, ctx) num_bytes_in_result = _NumBytesBn(bn_result) bytes_result = ctypes.create_string_buffer(num_bytes_in_result) ssl.BN_bn2bin(bn_result, bytes_result) long_result = number.BytesToLong(bytes_result.raw) # clean up ssl.BN_CTX_free(ctx) ssl.BN_free(bn_a) ssl.BN_free(bn_b) ssl.BN_free(bn_c) ssl.BN_free(bn_result) return long_result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def powMod(a,b,c):\n if GMPY:\n return(gmpy2.powmod(a,b,c))\n else:\n return pow(a,b,c)", "def TestSslRegression():\n if not _FOUND_SSL:\n return\n a = 13237154333272387305 # random\n b = 14222796656191241573 # random\n c = 14335739297692523692 # random\n expect_m = 10659231545499717801 # pow(a, b, c)\n m = ModExp(a, b, c)\n assert m == expect_m, 'TestSslRegression: unexpected ModExp result'", "def modExp(a, b, n):\n c = 0\n d = 1\n for bi in bin(b)[2:]:\n c = 2 * c\n d = (d * d) % n\n if bi == '1':\n c += 1\n d = (d * a) % n\n return d", "def powmod(self, a, c):\r\n a %= self.base\r\n res = 1\r\n\r\n for _ in range(c):\r\n res = (res * a) % self.base\r\n \r\n return res", "def modular_multiply(A, B, C):\n a_mod_c = A % C\n b_mod_c = B % C\n result = (a_mod_c * b_mod_c) % C\n return result", "def calculateCrypt(asci: int, e: int, n: int) -> int:\n return pow(int(asci),e,n)", "def exp_mod( a, b, n):\n r = int(1)\n while(b):\n if(b&1):\n r=(r*a)%n\n a=(a*a)%n\n b>>=1 # b = b>>1\n \n return r", "def endecrypt(x, e, c):\n\n return modulo(x, e, c)", "def square_and_multiply(x: int, exponent: int, modulus: int = None, Verbose: bool = False):\n b = bin(exponent).lstrip(\"0b\")\n r = 1\n for i in b:\n\n rBuffer = r\n r = r ** 2\n \n if i == \"1\":\n r = r * x\n if modulus:\n r %= modulus\n \n if Verbose:\n print(f\"{rBuffer}^2 = {r} mod {modulus}\")\n \n return r", "def encrypt(_g, _s, _e, _n, _m):\n\tr = gmpy2.xmpz(1)\n\tg = gmpy2.xmpz(_g)\n\ts = gmpy2.xmpz(_s)\n\te = gmpy2.xmpz(_e)\n\tn = gmpy2.xmpz(_n)\n\tm = gmpy2.xmpz(_m)\n\n\tb1 = f_mod(e, n)\n\tb1 = pow(g, pow(s, b1))\n\tb1 = mul(b1, f_mod(pow(r,m), pow(m,2)))\n\treturn b1", "def solveECDLP(a, b, orderP):\n a = int(a)\n b = int(b)\n res = None\n try:\n res = int(mod(-a*inverse_mod(b, orderP), orderP))\n except:\n pass\n return res", "def powermod(a, b, m):\n return mod(power(a, b), m)", "def mod_pow(a: int, b: int, m: int) -> int:\n\tres = 1\n\twhile b > 0:\n\t\tif b % 2 != 0:\n\t\t\tres = (res * a) % m\n\t\ta = (a * a) % m\n\t\tb //= 2\n\treturn res", "def exp_mod(a, b, nbr):\n bina = [int(x) for x in bin(a)[2:]]\n #binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n #while len(binn)<len(bina):\n # binn = [0]+binn\n #print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*4+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n nanya = len(bina)+3*len(binn)+1 # debut de \"APOW\" (ce qui doit etre mesuré)\n q = QuantumRegister(n+2, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n expmod(circ, q, # X, a, A, APOW, Y, n, N, binn, lost, lost2)\n [q[i] for i in range(len(bina))],\n b%nbr,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nanya] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n if len(bina)%2:\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n else:\n circ_m = measure(circ, q, [i for i in range(nanya,n)])\n #circ_m = measure(circ, q, [i for i in range(n)])\n return circ_m", "def powmod(b,e,n):\r\n\treturn power_mod(b,e,n)", "def modinv(a, b):\n g, x, _ = xgcd(a, b)\n\n if g == 1:\n return x % b\n else:\n raise Exception('modular inverse does not exist')", "def powmod(b,e,n):\n\treturn power_mod(b,e,n)", "def c_mul(self, a, b):\n return ((int(a) * int(b)) & 0xFFFFFFFF)", "def power_modulo(a: int, b: int, n: int) -> int:\n result = 1\n \n # Loop through all the binary digits of the numbers\n while b != 0:\n\n if b % 2 == 1:\n # b odd\n result = (result * a) % n\n # result = (result * (a % n)) % n\n\n a = (a * a) % n\n # a = ((a % n) * (a % n)) % n\n\n b //= 2\n\n return result", "def mod_exp(a, b, n):\n result = 1\n while True:\n if b % 2 == 1:\n result = (a * result) % n\n\n b = b / 2\n\n if b == 0:\n break\n\n a = (a * a) % n\n\n return result", "def myHash(string, base=91, mod=1000000321):\n value = 0\n for pos, elem in enumerate(string[::-1]): # считаем значение полинома\n value += ord(elem) * base**pos # в последней задаче сделано с помощью массива (динамика)\n return value % mod", "def modExponent(self, base, power):\n result = 1\n power = int(power)\n base = base % self.mod\n while power > 0:\n if power & 1:\n # self.modReduce(result * base)\n result = result * base % self.mod\n base = base * base % self.mod # self.modReduce(base * base)\n power = power >> 1\n return result", "def inverseMod(a,b):\n if GMPY:\n return int(gmpy2.invert(a,b))\n else:\n gcd, x, y = computeGCD(a, m)\n if gcd != 1:\n None # there is no inverse of a mod b\n else:\n return x % m", "def mulinv(a, b):\n g, x, _ = xgcd(a, b)\n if g == 1:\n return x % b", "def enc_mul_const(pub, m, c):\n mul_result = powmod(m, c, pub.n_sq)\n return mul_result", "def ModExp(n, k, m):\n a = list(bin(k))[2:]\n a.reverse()\n s = 1\n for i in a:\n if i == '1':\n s = (s * n) % m\n n = (n * n) % m\n return s", "def _raw_mul(self, plaintext):\n if not isinstance(plaintext, int) and not isinstance(plaintext, type(mpz(1))) and not isinstance(plaintext, numpy.int64):\n raise TypeError('Expected ciphertext to be int, not %s' %\n type(plaintext))\n\n if plaintext < 0 or plaintext >= self.public_key.n:\n raise ValueError('Scalar out of bounds: %i' % plaintext)\n\n if self.public_key.n - self.public_key.max_int <= plaintext:\n # Very large plaintext, play a sneaky trick using inverses\n neg_c = invert(self.ciphertext(False), self.public_key.nsquare)\n neg_scalar = self.public_key.n - plaintext\n return powmod(neg_c, neg_scalar, self.public_key.nsquare)\n else:\n return powmod(self.ciphertext(False), plaintext, self.public_key.nsquare)", "def mult_mod(a, b, nbr, control):\n bina = [int(x) for x in bin(a)[2:]]\n # binb = [int(x) for x in bin(b)[2:]]\n binn = [int(x) for x in bin(nbr)[2:]]\n while len(binn) < len(bina):\n binn = [0]+binn\n # print(bina, binn)\n binn.reverse()\n bina.reverse()\n n = len(bina)+len(binn)*3+1\n na = len(bina)\n nan = len(bina)+len(binn) # debut de Y\n nany = len(bina)+2*len(binn)+1 # debut de \"A\" (ici c'est b)\n q = QuantumRegister(n+2+1, 'q') # +lost+lost2+control\n circ = QuantumCircuit(q)\n for i in range(na):\n if bina[i]:\n circ.x(q[i])\n for i in range(len(binn)):\n if binn[i]:\n circ.x(q[na+i])\n if control:\n circ.x(q[n+2])\n cmultmod(circ, q, # control, X, a, A, Y, n, N, binn, lost, lost2\n q[n+2],\n [q[i] for i in range(len(bina))],\n b,\n [q[i+nany] for i in range(len(binn))],\n [q[i+nan] for i in range(len(binn)+1)],\n nbr,\n [q[i+na] for i in range(len(binn))],\n binn,\n q[n],\n q[n+1])\n circ_m = measure(circ, q, [i for i in range(nan,nany)])\n return circ_m", "def mod(a: Decimal, b: Decimal) -> Decimal:\n return a % b", "def test_mulmod(self):\n from manticore.platforms import evm\n from manticore.core.smtlib import ConstraintSet, Z3Solver, Operators\n\n constraints = ConstraintSet()\n\n address = 0x41414141414141414141\n data = b\"\"\n caller = 0x42424242424242424242\n value = 0\n bytecode = \"\"\n vm = evm.EVM(constraints, address, data, caller, value, bytecode, gas=23000)\n\n self.assertEqual(vm.MULMOD(12323, 2343, 20), 9)\n self.assertEqual(vm.MULMOD(12323, 2343, 0), 0)\n\n A, B, C = (\n 110427941548649020598956093796432407239217743554726184882600387580788736,\n 1048576,\n 4194319,\n )\n self.assertEqual(vm.MULMOD(A, B, C), 2423129)\n a, b, c = (\n constraints.new_bitvec(256),\n constraints.new_bitvec(256),\n constraints.new_bitvec(256),\n )\n constraints.add(a == A)\n constraints.add(b == B)\n constraints.add(c == C)\n result = vm.MULMOD(a, b, c)\n # 0x8000000000000000000000000000000000000000000000000000000082000011\n self.assertEqual(Z3Solver.instance().get_all_values(constraints, result), [2423129])" ]
[ "0.6681694", "0.64587593", "0.6410224", "0.6405954", "0.62579936", "0.62428784", "0.6195865", "0.61904657", "0.6125475", "0.6122947", "0.6103147", "0.6046036", "0.6010627", "0.60101646", "0.5995779", "0.5965606", "0.5963928", "0.5952755", "0.59174013", "0.5871228", "0.58641744", "0.58526343", "0.58261245", "0.579913", "0.5794428", "0.57839745", "0.57826495", "0.57774776", "0.57667786", "0.5738667" ]
0.7635855
0
Test openssl BN functions ctypes setup for regressions.
def TestSslRegression(): if not _FOUND_SSL: return a = 13237154333272387305 # random b = 14222796656191241573 # random c = 14335739297692523692 # random expect_m = 10659231545499717801 # pow(a, b, c) m = ModExp(a, b, c) assert m == expect_m, 'TestSslRegression: unexpected ModExp result'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ssl_default(self):\n e = ErrataConnector()\n assert e.ssl_verify", "def test_ssl_default(self):\n assert security.security_settings.ssl_verify()", "def test_alpn_call_failure(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(ValueError):\n context.set_alpn_protos([])", "def test_init(self):\n DummyCryptographicObject()", "def test_ssl_ssr_kw(self):\r\n x = array(\r\n [75, 67, 70, 75, 65, 71, 67, 67, 76, 68, 57, 58, 60, 59, 62, 60, 60, 57, 59, 61,\r\n 58, 61, 56, 58, 57, 56, 61, 60, 57, 58, 58, 59, 58, 61, 57, 56, 58, 57, 57, 59, 62, 66,\r\n 65, 63, 64, 62, 65, 65, 62, 67])\r\n obs_ssl, obs_ssr, obs_sx = ssl_ssr_sx(x)\r\n exp_ssl = array(\r\n [47, 40, 45, 47, 35, 46, 40, 40, 49, 44, 3, 10, 21, 17, 29, 21, 21, 3, 17,\r\n 25, 10, 25, 0, 10, 3, 0, 25, 21, 3, 10, 10, 17, 10, 25, 3, 0, 10, 3, 3, 17, 29, 39, 35, 33,\r\n 34, 29, 35, 35, 29, 40])\r\n exp_ssr = array(\r\n [49, 44, 46, 49, 39, 47, 44, 44, 50, 45, 10, 17, 25, 21, 33, 25, 25, 10,\r\n 21, 29, 17, 29, 3, 17, 10, 3, 29, 25, 10, 17, 17, 21, 17, 29, 10, 3, 17, 10, 10, 21, 33,\r\n 40, 39, 34, 35, 33, 39, 39, 33, 44])\r\n exp_sx = array(\r\n [56, 56, 56, 57, 57, 57, 57, 57, 57, 57, 58, 58, 58, 58, 58, 58, 58, 59,\r\n 59, 59, 59, 60, 60, 60, 60, 61, 61, 61, 61, 62, 62, 62, 62, 63, 64, 65, 65, 65, 65, 66,\r\n 67, 67, 67, 67, 68, 70, 71, 75, 75, 76])\r\n self.assertEqual(obs_ssl, exp_ssl)\r\n self.assertEqual(obs_ssr, exp_ssr)\r\n self.assertEqual(obs_sx, exp_sx)", "def __init__(self, lib: ctypes.CDLL, seed: int) -> None:\n self.stanlib = lib\n\n construct = self.stanlib.bs_rng_construct\n construct.restype = ctypes.c_void_p\n construct.argtypes = [ctypes.c_uint, star_star_char]\n self.ptr = construct(seed, None)\n\n if not self.ptr:\n raise RuntimeError(\"Failed to construct RNG.\")\n\n self._destruct = self.stanlib.bs_rng_destruct\n self._destruct.restype = None\n self._destruct.argtypes = [ctypes.c_void_p]", "def initialize_libca():\n if 'EPICS_CA_MAX_ARRAY_BYTES' not in os.environ:\n os.environ['EPICS_CA_MAX_ARRAY_BYTES'] = \"%i\" % 2**24\n\n dllname = find_libca()\n load_dll = ctypes.cdll.LoadLibrary\n global libca, initial_context\n if os.name == 'nt':\n load_dll = ctypes.windll.LoadLibrary\n try:\n libca = load_dll(dllname)\n except:\n raise ChannelAccessException('loading Epics CA DLL failed')\n\n ca_context = {False:0, True:1}[PREEMPTIVE_CALLBACK]\n ret = libca.ca_context_create(ca_context)\n if ret != dbr.ECA_NORMAL:\n raise ChannelAccessException('cannot create Epics CA Context')\n\n # set argtypes and non-default return types\n # for several libca functions here\n libca.ca_pend_event.argtypes = [ctypes.c_double]\n libca.ca_pend_io.argtypes = [ctypes.c_double]\n libca.ca_client_status.argtypes = [ctypes.c_void_p, ctypes.c_long]\n libca.ca_sg_block.argtypes = [ctypes.c_ulong, ctypes.c_double]\n\n libca.ca_current_context.restype = ctypes.c_void_p\n libca.ca_version.restype = ctypes.c_char_p\n libca.ca_host_name.restype = ctypes.c_char_p\n libca.ca_name.restype = ctypes.c_char_p\n libca.ca_message.restype = ctypes.c_char_p\n\n # save value offests used for unpacking\n # TIME and CTRL data as an array in dbr module\n dbr.value_offset = (39*ctypes.c_short).in_dll(libca,'dbr_value_offset')\n initial_context = current_context()\n\n if AUTO_CLEANUP:\n atexit.register(finalize_libca)\n return libca", "def test_ssl_env( # noqa: C901 # FIXME\n thread_exceptions,\n recwarn,\n mocker,\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, tls_verify_mode, tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n use_client_cert,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob('127.0.0.1'),\n ):\n client_cert = ca.issue_cert(ntou('127.0.0.1'))\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlswsgiserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)\n\n resp = requests.get(\n 'https://' + interface + ':' + str(port) + '/env',\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n cert=cl_pem if use_client_cert else None,\n )\n\n env = json.loads(resp.content.decode('utf-8'))\n\n # hard coded env\n assert env['wsgi.url_scheme'] == 'https'\n assert env['HTTPS'] == 'on'\n\n # ensure these are present\n for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:\n assert key in env\n\n # pyOpenSSL generates the env before the handshake completes\n if adapter_type == 'pyopenssl':\n return\n\n for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:\n assert key in env\n\n # client certificate env\n if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:\n assert env['SSL_CLIENT_VERIFY'] == 'NONE'\n else:\n assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'\n\n with open(cl_pem, 'rt') as f:\n assert env['SSL_CLIENT_CERT'] in f.read()\n\n for key in {\n 'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',\n 'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',\n }:\n assert key in env\n\n # builtin ssl environment generation may use a loopback socket\n # ensure no ResourceWarning was raised during the test\n if IS_PYPY:\n # NOTE: PyPy doesn't have ResourceWarning\n # Ref: https://doc.pypy.org/en/latest/cpython_differences.html\n return\n for warn in recwarn:\n if not issubclass(warn.category, ResourceWarning):\n continue\n\n # the tests can sporadically generate resource warnings\n # due to timing issues\n # all of these sporadic warnings appear to be about socket.socket\n # and have been observed to come from requests connection pool\n msg = str(warn.message)\n if 'socket.socket' in msg:\n pytest.xfail(\n '\\n'.join((\n 'Sometimes this test fails due to '\n 'a socket.socket ResourceWarning:',\n msg,\n )),\n )\n pytest.fail(msg)\n\n # to perform the ssl handshake over that loopback socket,\n # the builtin ssl environment generation uses a thread\n for _, _, trace in thread_exceptions:\n print(trace, file=sys.stderr)\n assert not thread_exceptions, ': '.join((\n thread_exceptions[0][0].__name__,\n thread_exceptions[0][1],\n ))", "def _hinit(self, c):\n c.setopt(pycurl.SSL_VERIFYPEER, 0) # FIXME\n c.setopt(pycurl.SSL_VERIFYHOST, 1)", "def build_tls_gtirb() -> gtirb.IR:\n (ir, module) = gth.create_test_module(\n gtirb.Module.FileFormat.ELF,\n gtirb.Module.ISA.X64,\n )\n (text_section, text_bi) = gth.add_text_section(module)\n\n _, got = gth.add_section(module, \".got\")\n got_data_block = gth.add_data_block(got, b\"\\x00\\x00\\x00\\x00\")\n\n symbol_proxy = gth.add_symbol(\n module, \"__lib_value\", gth.add_proxy_block(module)\n )\n\n symbol_got = gth.add_symbol(module, \".L_1abc0\", got_data_block)\n\n se_symbol_got = gtirb.SymAddrConst(\n 0,\n symbol_got,\n {\n gtirb.SymbolicExpression.Attribute.GOT,\n gtirb.SymbolicExpression.Attribute.TPOFF,\n },\n )\n\n # For the following code:\n # 48 8b 05 00 00 00 00 mov __lib_value@GOTTPOFF(%rip), %rax\n # 48 31 c0 xor %rax,%rax\n # 48 c7 c0 3c 00 00 00 mov $0x3c,%rax\n # 48 31 ff xor %rdi,%rdi\n # 0f 05 syscall\n cb = gth.add_code_block(\n text_bi,\n b\"\\x48\\x8b\\x05\\x00\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xc0\"\n b\"\\x48\\xc7\\xc0\\x3c\\x00\\x00\\x00\"\n b\"\\x48\\x31\\xff\"\n b\"\\x0f\\x05\",\n {3: se_symbol_got},\n )\n symbol_start = gth.add_symbol(module, \"_start\", cb)\n\n module.aux_data[\"libraries\"].data.append(\"libvalue.so\")\n\n module.aux_data[\"elfSymbolInfo\"].data[symbol_start.uuid] = (\n 0,\n \"FUNC\",\n \"GLOBAL\",\n \"DEFAULT\",\n 0,\n )\n\n module.aux_data[\"elfSymbolInfo\"].data[symbol_proxy.uuid] = (\n 0,\n \"TLS\",\n \"GLOBAL\",\n \"DEFAULT\",\n 0,\n )\n\n module.aux_data[\"elfSymbolInfo\"].data[symbol_got.uuid] = (\n 0,\n \"NONE\",\n \"LOCAL\",\n \"DEFAULT\",\n 0,\n )\n\n module.aux_data[\"elfSymbolVersions\"] = gtirb.AuxData(\n type_name=(\n \"tuple<mapping<uint16_t,tuple<sequence<string>,uint16_t>>,\"\n \"mapping<string,mapping<uint16_t,string>>,\"\n \"mapping<UUID,tuple<uint16_t,bool>>>\"\n ),\n data=(\n # ElfSymVerDefs\n {},\n # ElfSymVerNeeded\n {\"libvalue.so\": {1: \"LIBVALUE_1.0\"}},\n # ElfSymbolVersionsEntries\n {symbol_proxy.uuid: (1, False)},\n ),\n )\n\n module.aux_data[\"symbolForwarding\"].data[symbol_got.uuid] = symbol_proxy\n\n return ir", "def setUpModule():\n print(\"In setUpModule()...\")\n global math_obj\n math_obj = mymathlib()", "def patch_crypto_be_discovery():\n\n from cryptography.hazmat import backends\n\n try:\n from cryptography.hazmat.backends.commoncrypto.backend import \\\n backend as be_cc\n except ImportError:\n be_cc = None\n\n try:\n from cryptography.hazmat.backends.openssl.backend import \\\n backend as be_ossl\n except ImportError:\n be_ossl = None\n\n backends._available_backends_list = [\n be for be in (be_cc, be_ossl) if be is not None\n ]", "def get_rsa_asymn_keys(public_exponent = 65537, key_size = 2048, bc = backend):\n\tprivate_key = asymmetric.rsa.generate_private_key(public_exponent = public_exponent, key_size = key_size, backend = bc)\n\treturn private_key,private_key.public_key()", "def test_OPENSSL_VERSION_NUMBER(self):\n assert isinstance(OPENSSL_VERSION_NUMBER, int)", "def setup_ctypes():\n lib.createComplex.argtypes = [ctypes.c_double, ctypes.c_double]\n lib.createComplex.restype = ctypes.c_void_p\n\n lib.deleteComplex.argypes = [ctypes.c_void_p]\n lib.deleteComplex.restype = None\n\n lib.getRealPart.argypes = [ctypes.c_void_p]\n lib.getRealPart.restype = ctypes.c_double\n\n lib.getImaginaryPart.argypes = [ctypes.c_void_p]\n lib.getImaginaryPart.restype = ctypes.c_double\n\n lib.add.argypes = [ctypes.c_void_p, ctypes.c_void_p]\n lib.add.restype = ctypes.c_void_p\n\n lib.equals.argtypes = [ctypes.c_void_p, ctypes.c_void_p]\n lib.equals.restype = ctypes.c_bool", "def contract_pepo_pbc():\n pass", "def test_dig_sig(self):\n\n for using in [HashTypes.SHA1, HashTypes.SHA2, ]:\n self.do_test_dig_sig(using)", "def test_cgc_random_syscall_handling_native_interface():\n\n binary = os.path.join(bin_location, \"tests\", \"cgc\", \"KPRCA_00011\")\n pov_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00011_POV_00000.xml\")\n output_file = os.path.join(bin_location, \"tests_data\", \"cgc_povs\", \"KPRCA_00011_stdout.txt\")\n add_options = {\n angr.options.UNICORN_HANDLE_CGC_RECEIVE_SYSCALL,\n angr.options.UNICORN_HANDLE_CGC_RANDOM_SYSCALL,\n angr.options.UNICORN_HANDLE_SYMBOLIC_ADDRESSES,\n angr.options.UNICORN_HANDLE_SYMBOLIC_CONDITIONS,\n angr.options.UNICORN_HANDLE_SYMBOLIC_SYSCALLS,\n }\n\n rand_syscall_data = {\n \"random\": [\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n (65, 1),\n (16705, 2),\n (16705, 2),\n ]\n }\n with open(output_file, \"rb\") as fh:\n output_bytes = fh.read()\n\n trace_cgc_with_pov_file(\n binary,\n \"tracer_cgc_receive_unicorn_native_interface_rx_bytes\",\n pov_file,\n output_bytes,\n add_options=add_options,\n syscall_data=rand_syscall_data,\n )", "def setup_lib():\n global _lib\n if not _lib:\n for name in [ find_library(\"http_parser\"), \"libhttp_parser.so\", \"libhttp_parser\" ]:\n try:\n _lib = cdll.LoadLibrary(name)\n except OSError as e:\n pass\n else:\n break\n if not _lib:\n _lib = cdll.http_parser # propogate OSError if this fails\n\n _lib.http_parser_version.argtypes = []\n _lib.http_parser_version.restype = c_ulong\n\n _lib.http_parser_settings_init.argtypes = [c_void_p]\n _lib.http_parser_settings_init.restype = None\n\n _lib.http_parser_execute.argtypes = [c_void_p, c_void_p, POINTER(c_char), c_size_t]\n _lib.http_parser_execute.restype = c_size_t\n\n _lib.http_should_keep_alive.argtypes = [c_void_p]\n _lib.http_should_keep_alive.restype = c_int\n\n _lib.http_method_str.argtypes = [c_int]\n _lib.http_method_str.restype = c_char_p\n\n _lib.http_status_str.argtypes = [c_int]\n _lib.http_status_str.restype = c_char_p\n\n _lib.http_errno_name.argtypes = [c_int]\n _lib.http_errno_name.restype = c_char_p\n\n _lib.http_errno_description.argtypes = [c_int]\n _lib.http_errno_description.restype = c_char_p\n\n _lib.http_parser_url_init.argtypes = [c_void_p]\n _lib.http_parser_url_init.restype = None\n\n _lib.http_parser_parse_url.argtypes = [POINTER(c_char), c_size_t, c_int, c_void_p]\n _lib.http_parser_parse_url.restype = c_int\n\n _lib.http_parser_pause.argtypes = [c_void_p, c_int]\n _lib.http_parser_pause.restype = None\n\n _lib.http_body_is_final.argtypes = [c_void_p]\n _lib.http_body_is_final.restype = c_int\n\n _lib.http_parser_set_max_header_size.argtypes = [c_int]\n _lib.http_parser_set_max_header_size.restype = None\n\n return _lib", "def test_ssl_object_attributes(self) -> None:\n self.start_dummy_server()\n\n sock = socket.create_connection((self.host, self.port))\n with SSLTransport(\n sock, self.client_context, server_hostname=\"localhost\"\n ) as ssock:\n cipher = ssock.cipher()\n assert type(cipher) == tuple\n\n # No chosen protocol through ALPN or NPN.\n assert ssock.selected_alpn_protocol() is None\n assert ssock.selected_npn_protocol() is None\n\n shared_ciphers = ssock.shared_ciphers()\n # SSLContext.shared_ciphers() changed behavior completely in a patch version.\n # See: https://github.com/python/cpython/issues/96931\n assert shared_ciphers is None or (\n type(shared_ciphers) is list and len(shared_ciphers) > 0\n )\n\n assert ssock.compression() is None\n\n validate_peercert(ssock)\n\n ssock.send(sample_request())\n response = consume_socket(ssock)\n validate_response(response)", "def test_config_have_biopython():\n assert core.HAVE_BIOPYTHON\n args = Namespace(extended_validation='all')\n config = core.Config.from_args(args)\n assert config.extended_validation == 'all'", "def init():\n \n # ensure that required libraries are loaded\n LoadLibrary = ctypes.windll.kernel32.LoadLibraryA\n LoadLibrary(\"kernel32.dll\")\n\n hooks = [ (\"kernel32.dll\", \"LoadLibraryA\", LoadLibraryA_handler),\n (\"kernel32.dll\", \"GetProcAddress\", GetProcAddress_handler),\n (\"kernel32.dll\", \"malloc\", malloc_handler),\n (\"kernel32.dll\", \"memset\", memset_handler),\n (\"kernel32.dll\", \"ReadProcessMemory\", ReadProcessMemory_handler),\n (\"kernel32.dll\", \"VirtualAllocEx\", VirtualAllocEx_handler),\n (\"kernel32.dll\", \"WriteProcessMemory\", \\\n WriteProcessMemory_handler),\n ]\n\n for (dll_name, func_name, handler) in hooks:\n if not pybox.register_hook(dll_name,\n func_name,\n handler):\n logging.error(\"Failed to register hook for %s\" % func_name)\n \n return", "def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE", "def test_main():\n\n encoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n decoder = HttpMoleCryptoEncoder('foobar.org', 'foobar')\n retc = cb.mole.test.test_encoder(encoder, decoder=decoder)\n\n if retc == 0:\n print \"NO FAILURES / INCONCLUSIVE\"\n return retc", "def test_get_cipher_bits_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_bits() is None", "def init_rp_library():\n retval = RP_LIB.rp_Init()\n if retval != 0:\n LOG.error(\"Failed to initialize lockbox library. Error code: %s\", ERROR_CODES[retval])\n sys.exit(-1)", "def setUp(self):\n self.crypt = Crypt()", "def test_blind_sig(self):\n # See page 127 of the paper\n # (1) Initialization\n signer_obj = ECCBlind()\n point_r = signer_obj.signer_init()\n self.assertEqual(len(signer_obj.pubkey()), 35)\n\n # (2) Request\n requester_obj = ECCBlind(pubkey=signer_obj.pubkey())\n # only 64 byte messages are planned to be used in Bitmessage\n msg = os.urandom(64)\n msg_blinded = requester_obj.create_signing_request(point_r, msg)\n self.assertEqual(len(msg_blinded), 32)\n\n # check\n self.assertNotEqual(sha256(msg).digest(), msg_blinded)\n\n # (3) Signature Generation\n signature_blinded = signer_obj.blind_sign(msg_blinded)\n assert isinstance(signature_blinded, str)\n self.assertEqual(len(signature_blinded), 32)\n\n # (4) Extraction\n signature = requester_obj.unblind(signature_blinded)\n assert isinstance(signature, str)\n self.assertEqual(len(signature), 65)\n\n self.assertNotEqual(signature, signature_blinded)\n\n # (5) Verification\n verifier_obj = ECCBlind(pubkey=signer_obj.pubkey())\n self.assertTrue(verifier_obj.verify(msg, signature))", "def setup_lib(CLIB):\n # {{ SETUP_LIB }}", "def boost_initialization():\n global Lib_c \n Lib_c = ctypes.CDLL('./integral_function.so')\n Lib_c.set.restype = None\n Lib_c.set.argtypes = (ctypes.c_int, ctypes.c_void_p, ctypes.c_void_p)\n Lib_c.set_target.restype = None\n Lib_c.set_target.argtypes = (ctypes.c_int,)\n Lib_c.function.restype = ctypes.c_double\n Lib_c.function.argtypes = (ctypes.c_int,ctypes.c_double)" ]
[ "0.58895886", "0.54251087", "0.5340569", "0.52410597", "0.51989615", "0.5001536", "0.4908221", "0.48994425", "0.4890942", "0.4880682", "0.48786157", "0.48503911", "0.4837446", "0.4806173", "0.48055226", "0.47952127", "0.479222", "0.47784704", "0.47668955", "0.47659346", "0.47639802", "0.4748605", "0.47439075", "0.47297525", "0.47172403", "0.471581", "0.47153515", "0.47040865", "0.47032806", "0.47023645" ]
0.5731037
1
Overwrite the biothings query handler to add graphml format (&format=graphml) added &download=True to download .graphml file automatically, can disable (&download=False)
def write(self, chunk): try: if self.format == "graphml": chunk = edges2graphml(chunk, self.request.uri, self.request.protocol, self.request.host, edge_default="directed") self.set_header("Content-Type", "text/graphml; charset=utf-8") if self.args.download: self.set_header('Content-Disposition', 'attachment; filename="smartapi_metakg.graphml"') return super(BaseAPIHandler, self).write(chunk) except Exception as exc: logger.warning(exc) super().write(chunk)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(self, outputfile:str, **format_options) -> str:\n return self.connection.download(self.graph, outputfile, format_options)", "def download(self, outputfile:str, **format_options) -> str:\n return self.session.download(self.graph, outputfile, format_options)", "def ExportMetaGraph(prefix=''):\n global option\n option['export_meta_graph'] = prefix", "def getGraphURI(self, data):\n if data['newgraph']:\n return URIRef(data['newgraph'])\n if data['graph'][0]:\n return URIRef(data['graph'][0])\n return URIRef(data['downloadurl'])\n #3. check graph namespace?\n # e.g.: - check for owl:Ontology:about ???\n # - sorted([(len(x), x) for x in g.subjects()])[0]", "def generate_graphml_output(self, path):\n self.restructure_edge_info()\n self.restructure_node_info()\n return nx.write_graphml(self.G, path)", "def toGML(self):\n raise NotImplementedError", "def gexf_graph():\n # you must replace these lines and supply your own graph\n \n \n \n my_gexf = Gexf(\"JiajiaXie\", \"My awesome graph\")\n graph=my_gexf.addGraph(\"undirected\", \"static\", \"My awesome networks\")\n \n atr1=graph.addNodeAttribute('Type',type='string')\n\n\n for set in data_specific:\n if graph.nodeExists(set['set_num']) ==0:\n tm1=graph.addNode(set['set_num'], set['name'], r='0', g='0', b='0')\n tm1.addAttribute(atr1,\"set\")\n\n\n\n counter_test=1\n for set, part in data_parts.items():\n for key, part_list in part.items():\n interme =part_list['color']\n red=interme[0]+interme[1]\n green=interme[2]+interme[3]\n blue=interme[4]+interme[5]\n\n red_de=str(int(red,16))\n green_de=str(int(green,16))\n blue_de=str(int(blue,16))\n if graph.nodeExists(part_list['id'])==0:\n tm2=graph.addNode(part_list['id'], part_list['part_name'],r=red_de, g=green_de, b = blue_de)\n tm2.addAttribute(atr1,\"part\")\n\n\n counter_test+=1\n graph.addEdge(\"_\"+str(counter_test), set, part_list['id'], part_list['quantity'])\n\n\n\n f=open('bricks_graph.gexf','wb')\n my_gexf.write(f)\n\n\n return my_gexf.graphs[0]", "def LogMetaGraph(enabled=True):\n global option\n option['log_meta_graph'] = enabled", "def cli(yamlfile, **kwargs):\n print(RDFGenerator(yamlfile, **kwargs).serialize(**kwargs))", "def write_graph(graph, output_fp):\n output = output_fp + \".gexf\"\n print(f\"Graph written to {output}, visualise in gephi or similar\")\n nx.write_gexf(graph, output)", "def export_opml(request):\n feeds = Feed.active.all()\n site = get_current_site(request)\n return render(request, 'feedaggregator/export/opml.xml', {'feeds': feeds, 'site': site}, content_type=\"application/xhtml+xml\")", "def download(self, outputfile: str, outputformat: str):\n pass", "def export_to_file(self, path, graph_format):\n try:\n logging.info(\"Saving RDF data to \" + str(path))\n with open(path, \"wb\") as out_file:\n out_file.write(self.g.serialize(format=graph_format, encoding=\"UTF-8\"))\n except Exception as e:\n logging.error(\"Error while saving RDF results \"+str(e))", "def LogMetaGraph(graph_def):\n option = GetGlobalOptions()\n if option['log_meta_graph']: print(graph_def)", "def download(self,**attrs):\n\t\treturn super().download(**attrs)", "def generate_subgraph(format):\n\n # get business information\n directorypath = genpath+directory\n if os.path.isfile(directorypath):\n \n bizdata = pd.read_csv( directorypath, escapechar='\\\\')\n\n #create a directory of page-id and object-ids\n tempdf = bizdata.set_index('pageid')\n tempdf = tempdf['objectid']\n dictionary = tempdf.to_dict()\n\n uncgraph = pd.read_csv(inpath+graphfile, escapechar='\\\\')\n uncgraph = uncgraph.dropna()\n uncgraph['likee_object_id'] = uncgraph.apply(lambda x: dictionary.get(x['likee_page_id']), axis=1)\n cgraph = uncgraph.dropna()\n cgraph = cgraph[['liker_page_id', 'likee_page_id']]\n cgraph.columns = ['Source', 'Target']\n\n \n print_stats(cgraph)\n if format == 'networkx' :\n print \"[Generating a networkX graph...]\" \n cgraph.to_csv(genpath+subgraph+'.ntx', index=False, header=False, sep= ' ')\n else:\n print \"[Generating a csv graph...]\" \n cgraph.to_csv(genpath+subgraph+'.csv', index=False)\n\n\n else:\n print \"Either file is missing or is not readable\"", "def show_graph_with_learning(self, output_fmt='pdf', direction = 'BT', learning_color='blue'):\n from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.ObjectiveMechanism import ObjectiveMechanism\n from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.LearningMechanisms.LearningMechanism import LearningMechanism\n from PsyNeuLink.Components.Projections.MappingProjection import MappingProjection\n\n import graphviz as gv\n\n system_graph = self.graph\n learning_graph=self.learningGraph\n \n # build graph and configure visualisation settings\n G = gv.Digraph(engine = \"dot\", \n node_attr = {'fontsize':'12', 'fontname': 'arial', 'shape':'oval'}, \n edge_attr = {'arrowhead':'halfopen', 'fontsize': '10', 'fontname': 'arial'},\n graph_attr = {\"rankdir\" : direction} )\n \n # work with system graph\n rcvrs = list(system_graph.keys())\n # loop through receivers\n for rcvr in rcvrs:\n rcvr_name = rcvr[0].name\n rcvr_label = rcvr_name\n\n # loop through senders\n sndrs = system_graph[rcvr]\n for sndr in sndrs:\n sndr_name = sndr[0].name\n sndr_label = sndr_name\n\n # find edge name\n projs = sndr[0].outputState.sendsToProjections\n for proj in projs:\n if proj.receiver.owner == rcvr[0]:\n edge_name = proj.name\n draw_node = not proj.has_learning_projection\n edge_label = edge_name\n #### CHANGE MADE HERE ###\n if draw_node:\n G.edge(sndr_label, rcvr_label, label = edge_label)\n else:\n G.node(sndr_label, shape=\"oval\")\n G.node(edge_label, shape=\"diamond\")\n G.node(rcvr_label, shape=\"oval\")\n G.edge(sndr_label, edge_label, arrowhead='none')\n G.edge(edge_label, rcvr_label)\n #### CHANGE MADE HERE ###\n \n rcvrs = list(learning_graph.keys())\n \n for rcvr in rcvrs:\n # if rcvr is projection\n if isinstance(rcvr, MappingProjection):\n # for each sndr of rcvr\n sndrs = learning_graph[rcvr]\n for sndr in sndrs:\n G.edge(sndr.name, rcvr.name)\n else:\n sndrs = learning_graph[rcvr]\n for sndr in sndrs:\n G.node(rcvr.name, color=learning_color)\n G.node(sndr.name, color=learning_color)\n G.edge(sndr.name, rcvr.name, color=learning_color)\n \n if output_fmt == 'pdf':\n G.view(self.name.replace(\" \", \"-\"), cleanup=True)\n elif output_fmt == 'jupyter':\n return G", "def gexf_graph():\n # you must replace these lines and supply your own graph\n gexf = Gexf(\"author\", \"title\")\n mygraph = gexf.addGraph(\"undirected\", \"static\", \"A web network\")\n atr_type = mygraph.addNodeAttribute('Type', type='string')\n atr_id = mygraph.addNodeAttribute('id', type='string')\n atr_label = mygraph.addNodeAttribute('label', type='string')\n atr_color_r = mygraph.addNodeAttribute('color_r', type='string', defaultValue='0')\n atr_color_g = mygraph.addNodeAttribute('color_g', type='string', defaultValue='0')\n atr_color_b = mygraph.addNodeAttribute('color_b', type='string', defaultValue='0')\n k = 0\n for i in range(min_parts()):\n tmp = mygraph.addNode(set_num[i], name[i], r=\"0\", g=\"0\", b=\"0\")\n tmp.addAttribute(atr_type, \"set\")\n tmp.addAttribute(atr_id, set_num[i])\n tmp.addAttribute(atr_label, name[i])\n for j in range(len(Parts[i][\"Parts\"])):\n if mygraph.nodeExists(Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"])==0:\n temp = mygraph.addNode((Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), Parts[i][\"Parts\"][j][\"name\"], r=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2], 16)), g=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4], 16)), b=str(int(Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6], 16)))\n temp.addAttribute(atr_type, \"part\")\n temp.addAttribute(atr_id, (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]))\n temp.addAttribute(atr_label, Parts[i][\"Parts\"][j][\"name\"])\n temp.addAttribute(atr_color_r, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][0:2])\n temp.addAttribute(atr_color_g, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][2:4])\n temp.addAttribute(atr_color_b, Parts[i][\"Parts\"][j][\"color\"][\"rgb\"][4:6])\n mygraph.addEdge(str(k), set_num[i], (Parts[i][\"Parts\"][j][\"number\"]+\"_\"+Parts[i][\"Parts\"][j][\"color\"][\"rgb\"]), weight=Parts[i][\"Parts\"][j][\"quantity\"])\n k = k+1\n output_file = open(\"bricks_graph.gexf\", \"wb\")\n gexf.write(output_file)\n return -1", "def __generate_graph(self, graph_type, graph_content):\n\n gv_format = str(self.__arg_options['format'])\n gv_type = str(graph_type)\n gv_location = str(self.__config_content['location'])\n gv_content = dict(graph_content)\n\n try:\n graph = GraphGenerator()\n graph.set_graph_config(gv_format, gv_location)\n graph.generate_graph(gv_content, gv_type)\n except (TypeError, ValueError) as error:\n self.__LOGGER.error(error)", "def export_graph(graph, name_file, format_export):\n im_name = ('{}.' + format_export).format('./' + name_file)\n if (format_export == \"png\"):\n graph.write_png(im_name)\n elif (format_export == \"dot\"):\n graph.write_dot(im_name)\n else:\n raise LookupError", "def connect_and_write_gml(self, f):\n G = self.graph.copy()\n node_base_set = set([i[:-2] for i in list(G.nodes)])\n for node in node_base_set:\n G.add_edge(node + \"_b\", node + \"_e\")\n\n # networkx doesn't like writing non-string attributes to GML\n for u, v in G.edges:\n for key in list(G[u][v].keys()):\n G[u][v][key] = str(G[u][v][key])\n nx.readwrite.gml.write_gml(G, f)", "def show_custom_graph(self):\n pass", "def graphiql(request):\n del request\n graphiql_filepath = pathlib.Path(__file__).absolute().parent / \"graphiql.html\"\n with open(graphiql_filepath) as f:\n return django.http.response.HttpResponse(f.read())", "def getGraph(self, data):\n graphuri = self.getGraphURI(data)\n # TODO: consider using temporary disk storage to not overload server\n # ... or stop parsing graph by some other means\n g = Graph(identifier=graphuri)\n\n if data['filedata'] and data['filedata'] != NOT_CHANGED:\n # uploaded file ...\n fmt = guessRDFFileFormat(data['format'],\n data['filedata'].contentType,\n data['filedata'].filename)\n g.parse(StringIO(data['filedata'].data), format=fmt)\n elif data['downloadurl']:\n # TODO: would be nice to check graph for graphuri, but it is\n # already a bit late here... needs improvement\n fmt = guessRDFFileFormat(data['format'], '', '')\n g.parse(data['downloadurl'], format=fmt)\n return g", "def ExportMetaGraph(graph_def):\n option = GetGlobalOptions()\n if option['export_meta_graph']:\n if not os.path.exists(option['export_meta_graph']):\n try:\n os.makedirs(option['export_meta_graph'])\n except Exception:\n raise ValueError('The given prefix is invalid.')\n\n path = os.path.join(\n option['export_meta_graph'],\n graph_def.name + '.metatxt')\n\n with open(path, 'w') as f: f.write(str(graph_def))\n logging.info('Export meta graph into: {}'.format(path))", "def save_graph(self, filename, fileType):\n if fileType == \"GML Format\":\n nx.write_gml(self.graph, filename+\".gml\")\n if fileType == \"Adjacency list\":\n nx.write_adjlist(self.graph, filename+\".adjlist\")\n if fileType == \"YAML\":\n nx.write_yaml(self.graph, filename + \".yaml\")", "def __init__(self, db):\n self.mime_types = dict(\n [('html', 'text/html'), ('pretty-xml', 'application/rdf+xml'),\n ('turtle', 'text/turtle'), ('nt', 'text/plain'),\n ('json-ld', 'application/ld+json'),\n ('sparql', 'application/sparql-results+xml'),\n ('sparql-json', 'application/sparql-results+json')])\n self.backend = RDFBackend(db)", "def show_graph(self, output_fmt='pdf', direction = 'BT'):\n from PsyNeuLink.Components.Mechanisms.ProcessingMechanisms.ObjectiveMechanism import ObjectiveMechanism\n from PsyNeuLink.Components.Mechanisms.AdaptiveMechanisms.LearningMechanisms.LearningMechanism import LearningMechanism\n \n import graphviz as gv\n\n system_graph = self.graph\n learning_graph=self.learningGraph\n \n # build graph and configure visualisation settings\n G = gv.Digraph(engine = \"dot\", \n node_attr = {'fontsize':'12', 'fontname': 'arial', 'shape':'oval'}, \n edge_attr = {'arrowhead':'halfopen', 'fontsize': '10', 'fontname': 'arial'},\n graph_attr = {\"rankdir\" : direction} )\n \n # work with system graph\n rcvrs = list(system_graph.keys())\n # loop through receivers\n for rcvr in rcvrs:\n if isinstance(rcvr[0], ObjectiveMechanism) or isinstance(rcvr[0], LearningMechanism):\n continue\n rcvr_name = rcvr[0].name\n rcvr_shape = rcvr[0].variable.shape[1]\n rcvr_label = \" {} ({}) \".format(rcvr_name, rcvr_shape)\n \n # loop through senders\n sndrs = system_graph[rcvr]\n for sndr in sndrs:\n sndr_name = sndr[0].name\n sndr_shape = sndr[0].variable.shape[1]\n sndr_label = \" {} ({}) \".format(sndr_name, sndr_shape)\n \n # find edge name\n projs = sndr[0].outputState.sendsToProjections\n for proj in projs:\n if proj.receiver.owner == rcvr[0]:\n edge_name = proj.name\n edge_shape = proj.matrix.shape\n edge_label = \" {} {} \".format(edge_name, edge_shape)\n G.edge(sndr_label, rcvr_label, label = edge_label)\n \n if output_fmt == 'pdf':\n G.view(self.name.replace(\" \", \"-\"), cleanup=True)\n elif output_fmt == 'jupyter':\n return G", "def wrapGraph(self, query) :\n\t\tif self.graph :\n\t\t\treturn \" GRAPH <%s> { %s } \" % (self.graph, query)\n\t\telse :\n\t\t\treturn query", "def gpml2json(path_in, path_out, pathway_iri, wp_id, pathway_version, wd_sparql):\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.json'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n\n gpml2pvjson_cmd = (\n f\"gpml2pvjson --id {pathway_iri} --pathway-version {pathway_version}\"\n )\n with open(path_in, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n gpml2pvjson_ps = subprocess.Popen(\n shlex.split(gpml2pvjson_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n gpml2pvjson_ps.communicate()[0]\n\n organism = None\n with open(path_out, \"r\") as json_f:\n pathway_data = json.load(json_f)\n pathway = pathway_data[\"pathway\"]\n organism = pathway[\"organism\"]\n entities_by_id = pathway_data[\"entitiesById\"]\n entities_with_valid_xrefs = list()\n for entity in entities_by_id.values():\n datasource_invalid = \"xrefDataSource\" in entity and (\n entity[\"xrefDataSource\"] in [\"undefined\"]\n or not entity[\"xrefDataSource\"]\n )\n xref_identifier_invalid = \"xrefIdentifier\" in entity and (\n entity[\"xrefIdentifier\"] in [\"undefined\"]\n or not entity[\"xrefIdentifier\"]\n )\n if datasource_invalid or xref_identifier_invalid:\n entity_id = entity[\"id\"]\n print(\n f\"Invalid xref datasource and/or identifier for {wp_id}, entity {entity_id}\"\n )\n # bridgedbjs fails when an identifier is something like 'undefined'.\n # Should it ignore datasources/identifiers it doesn't recognize\n # and just keep going?\n del entity[\"xrefDataSource\"]\n del entity[\"xrefIdentifier\"]\n else:\n entities_with_valid_xrefs.append(entity)\n with open(path_out, \"w\") as f_out:\n json.dump(pathway_data, f_out)\n\n if not organism:\n print(\"No organism. Can't call BridgeDb.\")\n elif len(entities_with_valid_xrefs) == 0:\n # TODO: bridgedbjs fails when no xrefs are present.\n # Update bridgedbjs to do this check:\n print(\"No xrefs to process.\")\n else:\n pre_bridgedb_json_f = f\"{dir_out}/{stub_out}.pre_bridgedb.json\"\n rename(path_out, pre_bridgedb_json_f)\n\n bridgedb_cmd = f\"\"\"bridgedb xrefs -f json \\\n -i '.entitiesById[].type' \"{organism}\" \\\n '.entitiesById[].xrefDataSource' \\\n '.entitiesById[].xrefIdentifier' \\\n ChEBI P683 Ensembl P594 \"Entrez Gene\" P351 HGNC P353 HMDB P2057 Wikidata\n \"\"\"\n with open(pre_bridgedb_json_f, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n bridgedb_ps = subprocess.Popen(\n shlex.split(bridgedb_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n bridgedb_ps.communicate()[0]\n\n no_wikidata_xrefs_by_bridgedb_key = dict()\n entity_ids_by_bridgedb_key = dict()\n with open(path_out, \"r\") as json_f:\n pathway_data = json.load(json_f)\n pathway = pathway_data[\"pathway\"]\n entities_by_id = pathway_data[\"entitiesById\"]\n for entity in entities_by_id.values():\n if (\n \"xrefIdentifier\" in entity\n and \"xrefDataSource\" in entity\n and entity[\"xrefDataSource\"] in BRIDGEDB2WD_PROPS\n and len(\n [\n entity_type\n for entity_type in entity[\"type\"]\n if entity_type.startswith(\"Wikidata:\")\n ]\n )\n == 0\n ):\n entity_id = entity[\"id\"]\n datasource = entity[\"xrefDataSource\"]\n xref_identifier = entity[\"xrefIdentifier\"]\n bridgedb_key = NON_ALPHANUMERIC_RE.sub(\n \"\", datasource + xref_identifier\n )\n no_wikidata_xrefs_by_bridgedb_key[bridgedb_key] = [\n datasource,\n xref_identifier,\n ]\n if bridgedb_key not in entity_ids_by_bridgedb_key:\n entity_ids_by_bridgedb_key[bridgedb_key] = [entity_id]\n else:\n entity_ids_by_bridgedb_key[bridgedb_key].append(entity_id)\n\n pathway_id_query = (\n '''\nSELECT ?item WHERE {\n?item wdt:P2410 \"'''\n + wp_id\n + \"\"\"\" .\nSERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }\n}\"\"\"\n )\n wd_pathway_id_result = wd_sparql.query(pathway_id_query)\n\n if len(wd_pathway_id_result[\"results\"][\"bindings\"]) == 0:\n print(f\"Pathway ID {wp_id} not found in Wikidata. Retrying.\")\n # retry once\n wd_pathway_id_result = wd_sparql.query(pathway_id_query)\n if len(wd_pathway_id_result[\"results\"][\"bindings\"]) == 0:\n # if it still doesn't work, skip it\n print(\n f\"Pathway ID {wp_id} still not found in Wikidata. Skipping conversion.\"\n )\n return False\n\n wikidata_pathway_iri = wd_pathway_id_result[\"results\"][\"bindings\"][0][\n \"item\"\n ][\"value\"]\n wikidata_pathway_identifier = wikidata_pathway_iri.replace(\n \"http://www.wikidata.org/entity/\", \"\"\n )\n\n # adding Wikidata IRI to sameAs property & ensuring no duplication\n if not \"sameAs\" in pathway:\n pathway[\"sameAs\"] = wikidata_pathway_identifier\n else:\n same_as = pathway[\"sameAs\"]\n if type(same_as) == str:\n pathway[\"sameAs\"] = list({wikidata_pathway_identifier, same_as})\n else:\n same_as.append(wikidata_pathway_identifier)\n pathway[\"sameAs\"] = list(set(same_as))\n\n headings = []\n queries = []\n for i, xref in enumerate(no_wikidata_xrefs_by_bridgedb_key.values()):\n [datasource, xref_identifier] = xref\n heading = \"?\" + NON_ALPHANUMERIC_RE.sub(\n \"\", datasource + xref_identifier\n )\n headings.append(heading)\n wd_prop = BRIDGEDB2WD_PROPS[datasource]\n queries.append(f'{heading} wdt:{wd_prop} \"{xref_identifier}\" .')\n\n # Here we chunk the headings and queries into paired batches and\n # make several smaller requests to WD. This is needed because some\n # of the GET requests become too large to send as a single request.\n\n batch_size = 10\n for [heading_batch, query_batch] in zip(\n grouper_it(batch_size, headings), grouper_it(batch_size, queries)\n ):\n headings_str = \" \".join(heading_batch)\n queries_str = (\n \"WHERE { \"\n + \" \".join(query_batch)\n + ' SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\" }}'\n )\n xref_query = f\"SELECT {headings_str} {queries_str}\"\n xref_result = wd_sparql.query(xref_query)\n xref_query = f\"SELECT {headings_str} {queries_str}\"\n xref_result = wd_sparql.query(xref_query)\n\n bridgedb_keys = xref_result[\"head\"][\"vars\"]\n for binding in xref_result[\"results\"][\"bindings\"]:\n for bridgedb_key in bridgedb_keys:\n # TODO: is this check needed?\n if type(binding[bridgedb_key][\"value\"]) == list:\n raise Exception(\"Error: expected list and got string\")\n\n wd_xref_identifier = binding[bridgedb_key][\"value\"].replace(\n \"http://www.wikidata.org/entity/\", \"\"\n )\n for entity_id in entity_ids_by_bridgedb_key[bridgedb_key]:\n entities_by_id[entity_id][\"type\"].append(\n f\"Wikidata:{wd_xref_identifier}\"\n )\n\n pre_wd_json_f = f\"{dir_out}/{stub_out}.pre_wd.json\"\n rename(path_out, pre_wd_json_f)\n with open(path_out, \"w\") as f_out:\n json.dump(pathway_data, f_out)" ]
[ "0.5644568", "0.5576068", "0.52066654", "0.5030416", "0.49787995", "0.4950677", "0.4896912", "0.48899424", "0.48656175", "0.48630908", "0.48574403", "0.48385876", "0.47709388", "0.47572508", "0.47426155", "0.47091573", "0.46963683", "0.46870074", "0.46818873", "0.46762022", "0.4673896", "0.46734902", "0.46678695", "0.46639162", "0.46167538", "0.46100268", "0.46004423", "0.45975113", "0.45945776", "0.4575607" ]
0.62148756
0
Tests if NAPI deny an IPv4 manually creation in a full network. Refactor to allow create the ip.
def test_try_create_ip_in_full_network(self): name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_4_1_net_8.json' response = self.client.post( '/api/v3/ipv4/', data=json.dumps(self.load_json_file(name_file)), content_type='application/json') self.compare_status(201, response.status_code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_try_create_auto_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_auto_net_free.json'\n\n # Does get request\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.1.2', response.data['ips'][0]['ip_formated'])", "def OSSupportsIPv4(self) -> bool:", "def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def SupportsIPv4(self) -> bool:", "def test_try_create_invalid_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_430_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Error save new IP.: 10.0.0.430',\n response.data['detail'])", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_add_autoassigned_pool_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2,\n ip=self.DEFAULT_IPV4_POOL)\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)", "def test_add_autoassigned_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)\n\n host.calicoctl(\"container remove {0}\".format(\"workload0\"))\n host.calicoctl(\"container remove {0}\".format(\"workload1\"))\n\n host.remove_workloads()\n\n # Test that recreating returns the next two IPs (IPs are not\n # reassigned automatically unless we have run out of IPs).\n workloads = self._setup_env(host, count=2, ip=\"ipv4\")\n\n workloads[0].assert_can_ping(\"192.168.0.3\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.2\", retries=3)", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def test_try_create_ip_associating_to_equipment(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_99_net_5_eqpt_1.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.0.99',\n response.data['ips'][0]['ip_formated'])", "def test_ipv4_validation_success():\n assert is_ipv4('8.8.8.8')", "def test_try_create_out_of_range_ip_in_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Ip 172.0.0.5 not available for network 5.',\n response.data['detail'])", "def test_ipam_ip_addresses_create(self):\n pass", "def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')", "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def test_ipv4_validation_failure():\n with pytest.raises(socket.error):\n is_ipv4('256.8.8.8')", "def test_IPv4_to_IP(self):\n self.assertEqual(helpers.IPv4_to_IP('0.0.0.0'), '00000000000000000000000000000000')\n self.assertEqual(\n helpers.IPv4s_to_IPs(['0.0.0.0', '136.154.62.169']),\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n )", "def test_same_ip(self):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo1.oregonstate.edu')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.0.2', ip_type='4', fqdn='foo2.oregonstate.edu')", "def test_create_host_subnet(self):\n pass", "def test_target_existence(self):\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='nonexistent.oregonstate.edu',\n ip_type='4')", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "def test_ipam_prefixes_available_ips_create(self):\n pass", "def test_IP_to_IPv4(self):\n self.assertEqual(helpers.IP_to_IPv4('00000000000000000000000000000000'), '0.0.0.0')\n self.assertEqual(\n helpers.IPs_to_IPv4s(\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n ),\n ['0.0.0.0', '136.154.62.169']\n )", "def test_07_associate_public_ip(self):\n # Validate the following\n # 1. Create a project\n # 2. Add some public Ips to the project\n # 3. Verify public IP assigned can only used to create PF/LB rules\n # inside project\n\n networks = Network.list(\n self.apiclient,\n projectid=self.project.id,\n listall=True\n )\n self.assertEqual(\n isinstance(networks, list),\n True,\n \"Check list networks response returns a valid response\"\n )\n self.assertNotEqual(\n len(networks),\n 0,\n \"Check list networks response returns a valid network\"\n )\n network = networks[0]\n self.debug(\"Associating public IP for project: %s\" % self.project.id)\n public_ip = PublicIPAddress.create(\n self.apiclient,\n zoneid=self.virtual_machine.zoneid,\n services=self.services[\"server\"],\n networkid=network.id,\n projectid=self.project.id\n )\n self.cleanup.append(public_ip)\n\n #Create NAT rule\n self.debug(\n \"Creating a NAT rule within project, VM ID: %s\" %\n self.virtual_machine.id)\n nat_rule = NATRule.create(\n self.apiclient,\n self.virtual_machine,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"created a NAT rule with ID: %s\" % nat_rule.id)\n nat_rule_response = NATRule.list(\n self.apiclient,\n id=nat_rule.id\n )\n self.assertEqual(\n isinstance(nat_rule_response, list),\n True,\n \"Check list response returns a valid list\"\n )\n self.assertNotEqual(\n len(nat_rule_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n nat_rule_response[0].id,\n nat_rule.id,\n \"Check Correct Port forwarding Rule is returned\"\n )\n\n #Create Load Balancer rule and assign VMs to rule\n self.debug(\"Created LB rule for public IP: %s\" %\n public_ip.ipaddress)\n lb_rule = LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n projectid=self.project.id\n )\n self.debug(\"Assigning VM: %s to LB rule: %s\" % (\n self.virtual_machine.name,\n lb_rule.id\n ))\n lb_rule.assign(self.apiclient, [self.virtual_machine])\n\n lb_rules = list_lb_rules(\n self.apiclient,\n id=lb_rule.id\n )\n self.assertEqual(\n isinstance(lb_rules, list),\n True,\n \"Check list response returns a valid list\"\n )\n #verify listLoadBalancerRules lists the added load balancing rule\n self.assertNotEqual(\n len(lb_rules),\n 0,\n \"Check Load Balancer Rule in its List\"\n )\n self.assertEqual(\n lb_rules[0].id,\n lb_rule.id,\n \"Check List Load Balancer Rules returns valid Rule\"\n )\n\n #Create Firewall rule with configurations from settings file\n fw_rule = FireWallRule.create(\n self.apiclient,\n ipaddressid=public_ip.ipaddress.id,\n protocol='TCP',\n cidrlist=[self.services[\"fw_rule\"][\"cidr\"]],\n startport=self.services[\"fw_rule\"][\"startport\"],\n endport=self.services[\"fw_rule\"][\"endport\"],\n projectid=self.project.id\n )\n self.debug(\"Created firewall rule: %s\" % fw_rule.id)\n\n # After Router start, FW rule should be in Active state\n fw_rules = FireWallRule.list(\n self.apiclient,\n id=fw_rule.id,\n )\n self.assertEqual(\n isinstance(fw_rules, list),\n True,\n \"Check for list FW rules response return valid data\"\n )\n\n self.assertEqual(\n fw_rules[0].state,\n 'Active',\n \"Check list load balancing rules\"\n )\n self.assertEqual(\n fw_rules[0].startport,\n self.services[\"fw_rule\"][\"startport\"],\n \"Check start port of firewall rule\"\n )\n\n self.assertEqual(\n fw_rules[0].endport,\n self.services[\"fw_rule\"][\"endport\"],\n \"Check end port of firewall rule\"\n )\n\n self.debug(\"Deploying VM for account: %s\" % self.account.name)\n virtual_machine_1 = VirtualMachine.create(\n self.apiclient,\n self.services[\"server\"],\n templateid=self.template.id,\n accountid=self.account.name,\n domainid=self.account.domainid,\n serviceofferingid=self.service_offering.id,\n )\n self.cleanup.append(virtual_machine_1)\n\n self.debug(\"VM state after deploy: %s\" % virtual_machine_1.state)\n # Verify VM state\n self.assertEqual(\n virtual_machine_1.state,\n 'Running',\n \"Check VM state is Running or not\"\n )\n\n self.debug(\"Creating NAT rule for VM (ID: %s) outside project\" %\n virtual_machine_1.id)\n with self.assertRaises(Exception):\n NATRule.create(\n self.apiclient,\n virtual_machine_1,\n self.services[\"natrule\"],\n public_ip.ipaddress.id,\n )\n\n self.debug(\"Creating LB rule for public IP: %s outside project\" %\n public_ip.ipaddress)\n with self.assertRaises(Exception):\n LoadBalancerRule.create(\n self.apiclient,\n self.services[\"lbrule\"],\n public_ip.ipaddress.id,\n accountid=self.account.name\n )\n return", "def test_check_ip_on_whitelist_false(self):\n\n ip_name = 'f11.my.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertFalse(result)", "def test_IPv4s_to_valid_CIDR(self):\n self.assertEqual(\n helpers.IPRange_to_valid_CIDR('192.168.0.1', '192.168.0.1'),\n '192.168.0.1/32'\n )", "def isIpv4AddrWithNetmask(string):\n return (True)", "def is_valid_ip(ip):\n ...", "def test_port_create_with_no_fixed_ips_no_ipam_on_routed_network(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n with self.subnet(network=network,\n segment_id=segment['segment']['id']):\n pass\n\n # Create an unbound port requesting no IP addresses\n response = self._create_port_and_show(network, fixed_ips=[])\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def test_ip_addresses_exists():\n load_ips()\n validate_names()" ]
[ "0.72138643", "0.7143597", "0.7103163", "0.69434977", "0.6816763", "0.6809881", "0.6664153", "0.65963924", "0.6582416", "0.6578206", "0.65741384", "0.6565793", "0.65070087", "0.64682233", "0.6466719", "0.6372231", "0.6264613", "0.62450415", "0.6238998", "0.61737835", "0.61651254", "0.61551386", "0.6140033", "0.6120405", "0.6082967", "0.60798943", "0.6076065", "0.6052339", "0.60294086", "0.6023387" ]
0.7489132
0
Tests if NAPI deny out of range network IPv4 manually creation.
def test_try_create_out_of_range_ip_in_network(self): name_file = 'api_ip/tests/sanity/ipv4/json/post/out_of_range_ipv4_172_0_0_5_net_5.json' response = self.client.post( '/api/v3/ipv4/', data=json.dumps(self.load_json_file(name_file)), content_type='application/json') self.compare_status(400, response.status_code) self.compare_values( 'Ip 172.0.0.5 not available for network 5.', response.data['detail'])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_ipv4_in_range(self):\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def test_ipv4_in_range_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n \n assert test_ip.in_range(\"191.167.0.0\",\"193.169.0.0\")\n assert test_ip.in_range(\"192.167.0.0\",\"192.169.0.0\")\n assert test_ip.in_range(\"192.168.0.0\",\"192.168.255.0\")\n assert test_ip.in_range(\"192.168.178.3\",\"192.168.178.5\")\n assert test_ip.in_range(\"192.168.178.4\",\"192.168.178.4\")\n \n assert test_ip.in_range(\"192.168.179.1\",\"192.168.179.3\") == False\n assert test_ip.in_range(\"10.168.179.1\",\"191.168.179.3\") == False", "def OSSupportsIPv4(self) -> bool:", "def test_ipv4_in_net(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\", force_v4=True)\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\", force_v4=True)\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_IPv4s_to_valid_CIDR(self):\n self.assertEqual(\n helpers.IPRange_to_valid_CIDR('192.168.0.1', '192.168.0.1'),\n '192.168.0.1/32'\n )", "def SupportsIPv4(self) -> bool:", "def test_ipv4_in_net_internal_v6(self):\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"192.168.178.0/24\")\n assert test_ip.in_network(\"192.168.178.0/29\")\n \n test_ip = ip_address.IPAddress(\"192.168.178.4/2\")\n assert test_ip.in_network(\"192.0.0.0/2\")\n\n test_ip = ip_address.IPAddress(\"192.168.178.4\")\n assert test_ip.in_network(\"10.0.11.0/4\") == False\n assert test_ip.in_network(\"192.169.178.0/24\") == False\n \n \n test_ip = ip_address.IPAddress(\"192.168.67.3\")\n assert test_ip.in_network(\"192.168.0.0/16\")", "def test_ipv4_validation_failure():\n with pytest.raises(socket.error):\n is_ipv4('256.8.8.8')", "def test_ptr_in_dynamic_range(self):\n self.create_network_range(\n network_str='128.193.1.0/24', start_str='128.193.1.2',\n end_str='128.193.1.100', range_type='dy')\n\n with self.assertRaises(ValidationError):\n self.create_ptr(\n ip_str='128.193.1.2', ip_type='4', fqdn='foo.oregonstate.edu')", "def test_try_create_invalid_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_0_430_net_5.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(400, response.status_code)\n self.compare_values(\n 'Error save new IP.: 10.0.0.430',\n response.data['detail'])", "def test_try_create_ip_in_full_network(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_10_0_4_1_net_8.json'\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)", "def validateIP():\n try:\n s = socket.inet_aton(args.target)\n except socket.error:\n print(\"\")\n print(f\"{bad_cmd} Bad IP address\")\n print(\"\")\n sys.exit()", "def test_ip4_cidr_syntax_internal_v6(self):\n \n test_ip = ip_address.IPAddress(\"192.168.0.1/24\")\n \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0xff, 0xff, 192, 168, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/16\") \n assert test_ip.addr == [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1/8\")\n assert test_ip.subnet == [0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x0, 0x0, 0]\n \n test_ip = ip_address.IPAddress(\"127.0.0.1\")\n assert test_ip.subnet == []", "def test_ipv4_validation_success():\n assert is_ipv4('8.8.8.8')", "def test_create_ports_in_vsd_managed_l2domain_dhcp_unmanaged_neg(self):\n # Given I have a VSD-L2-Unmanaged subnet\n vsd_l2_domain_template, vsd_l2_domain = \\\n self._given_vsd_l2_dhcp_disabled_domain()\n\n # create Openstack IPv4 subnet on Openstack based on VSD l2domain\n net_name = data_utils.rand_name('network-')\n network = self.create_network(network_name=net_name)\n ipv4_subnet = self.create_subnet(\n network,\n cidr=self.cidr4, mask_bits=self.mask_bits4_unsliced,\n gateway=None, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n self.assertEqual(\n str(next(IPNetwork(self.cidr4).subnet(self.mask_bits4_unsliced))),\n ipv4_subnet['cidr'])\n\n # shall not create a port with fixed-ip IPv6 in ipv4 subnet\n port_args = {'fixed_ips':\n [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(self.cidr6.first + 21)}]}\n self.assertRaisesRegex(\n tempest_exceptions.BadRequest,\n \"IP address %s is not a valid IP for the specified subnet\" %\n (IPAddress(self.cidr6.first + 21)),\n self.create_port,\n network,\n **port_args)\n\n # create Openstack IPv6 subnet\n ipv6_subnet = self.create_subnet(\n network,\n ip_version=6,\n cidr=self.cidr6, mask_bits=self.cidr6.prefixlen,\n gateway=vsd_l2_domain_template.ipv6_gateway, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n\n # shall not create port with IP already in use\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address':\n IPAddress(self.cidr4.first + 10)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address':\n IPAddress(self.cidr6.first + 10)}]}\n\n valid_port = self.create_port(network, **port_args)\n self.assertIsNotNone(valid_port)\n\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr4.first + 11)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr6.first + 10)}]}\n\n self.assertRaisesRegex(\n tempest_exceptions.Conflict,\n 'IP address {} already allocated in '\n 'subnet {}'.format(IPAddress(self.cidr6.first + 10),\n ipv6_subnet['id']),\n self.create_port,\n network,\n **port_args)\n\n # shall not create port with fixed ip in outside cidr\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr4.first + 12)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address': IPAddress(\n self.cidr6.first - 20)}]}\n self.assertRaisesRegex(\n tempest_exceptions.BadRequest,\n \"IP address %s is not a valid IP for the specified subnet\" %\n (IPAddress(self.cidr6.first - 20)),\n self.create_port,\n network,\n **port_args)", "def test_try_create_auto_ip(self):\n\n name_file = 'api_ip/tests/sanity/ipv4/json/post/ipv4_auto_net_free.json'\n\n # Does get request\n response = self.client.post(\n '/api/v3/ipv4/',\n data=json.dumps(self.load_json_file(name_file)),\n content_type='application/json')\n\n self.compare_status(201, response.status_code)\n\n url = prepare_url('/api/v3/ipv4/%s/' % response.data[0]['id'],\n fields=['ip_formated'])\n response = self.client.get(\n url,\n content_type='application/json')\n\n self.compare_status(200, response.status_code)\n self.compare_values('10.0.1.2', response.data['ips'][0]['ip_formated'])", "def test_add_autoassigned_pool_ipv4(self):\n with DockerHost('host', dind=False) as host:\n # Test that auto-assiging IPv4 addresses gives what we expect\n workloads = self._setup_env(host, count=2,\n ip=self.DEFAULT_IPV4_POOL)\n\n workloads[0].assert_can_ping(\"192.168.0.1\", retries=3)\n workloads[1].assert_can_ping(\"192.168.0.0\", retries=3)", "def test_target_resembles_ip(self):\n for fqdn in ('10.234.30.253', '128.193.0.3', 'fe80::e1c9:1:228d:d8'):\n with self.assertRaises(ValidationError):\n self.create_ptr(ip_str='128.193.0.2', fqdn=fqdn,\n ip_type='4')", "def test_port_create_with_no_fixed_ips_no_ipam_on_routed_network(self):\n with self.network() as network:\n segment = self._test_create_segment(\n network_id=network['network']['id'],\n physical_network='physnet',\n network_type=constants.TYPE_VLAN)\n with self.subnet(network=network,\n segment_id=segment['segment']['id']):\n pass\n\n # Create an unbound port requesting no IP addresses\n response = self._create_port_and_show(network, fixed_ips=[])\n self.assertEqual([], response['port']['fixed_ips'])\n self.assertEqual(ipalloc_apidef.IP_ALLOCATION_NONE,\n response['port'][ipalloc_apidef.IP_ALLOCATION])", "def is_reserved(ip):\n if ip_between(ip, \"0.0.0.0\", \"0.255.255.255\"):\n return True\n elif ip_between(ip, \"10.0.0.0\", \"10.255.255.255\"):\n return True\n elif ip_between(ip, \"100.64.0.0\", \"100.127.255.255\"):\n return True\n elif ip_between(ip, \"127.0.0.0\", \"127.255.255.255\"):\n return True\n elif ip_between(ip, \"169.254.0.0\", \"169.254.255.255\"):\n return True\n elif ip_between(ip, \"172.16.0.0\", \"172.31.255.255\"):\n return True\n elif ip_between(ip, \"192.0.0.0\", \"192.0.0.255\"):\n return True\n elif ip_between(ip, \"192.0.2.0\", \"192.0.2.255\"):\n return True\n elif ip_between(ip, \"192.88.99.0\", \"192.88.99.255\"):\n return True\n elif ip_between(ip, \"192.168.0.0\", \"192.168.255.255\"):\n return True\n elif ip_between(ip, \"198.18.0.0\", \"198.19.255.255\"):\n return True\n elif ip_between(ip, \"198.51.100.0\", \"198.51.100.255\"):\n return True\n elif ip_between(ip, \"203.0.113.0\", \"203.0.113.255\"):\n return True\n elif ip_between(ip, \"224.0.0.0\", \"255.255.255.255\"):\n return True\n else:\n return False", "def testSPFInvalidIPv4Range(self):\n spf_record = \"v=spf1 ip4:78.46.96.236/99 ~all\"\n domain = \"surftown.dk\"\n self.assertRaises(checkdmarc.SPFSyntaxError,\n checkdmarc.parse_spf_record, spf_record, domain)", "def test_ipam_ip_addresses_create(self):\n pass", "def test_check_ip_on_whitelist_false(self):\n\n ip_name = 'f11.my.com'\n\n result = check_ip_on_whitelist(ip_name, self.pattern_ip)\n\n self.assertFalse(result)", "def isIpv4AddrWithNetmask(string):\n return (True)", "def test_create_port_in_vsd_managed_l2domain_dhcp_unmanaged_neg(self):\n # Given I have a VSD-L2-Unmanaged subnet\n _, vsd_l2_domain = self._given_vsd_l2_dhcp_disabled_domain()\n\n # create Openstack IPv4 subnet on Openstack based on VSD l2domain\n net_name = data_utils.rand_name('network-')\n network = self.create_network(network_name=net_name)\n ipv4_subnet = self.create_subnet(\n network,\n cidr=self.cidr4, mask_bits=self.mask_bits4_unsliced,\n gateway=None, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n\n ipv6_subnet = self.create_subnet(\n network,\n ip_version=6, cidr=self.cidr6, mask_bits=self.mask_bits6,\n gateway=self.gateway6, enable_dhcp=False,\n nuagenet=vsd_l2_domain.id, net_partition=Topology.def_netpartition)\n\n # noinspection PyPep8\n invalid_ipv6 = [\n ('::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # Loopback\n ('FE80::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # Link local address\n (\"FF00:5f74:c4a5:b82e:ffff:ffff:ffff:ffff\",\n MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # multicast\n ('FF00::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # multicast address\n ('::1', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # not specified address\n ('::', MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # empty address\n (\"2001:ffff:ffff:ffff:ffff:ffff:ffff:ffff\",\n MSG_INVALID_IP_ADDRESS_FOR_SUBNET),\n # valid address, not in subnet\n ('', MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # empty string\n (\"2001:5f74:c4a5:b82e:ffff:ffff:ffff:ffff:ffff\",\n MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, too many segments\n (\"2001:5f74:c4a5:b82e:ffff:ffff:ffff\",\n MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, seven segments\n (\"2001;5f74.c4a5.b82e:ffff:ffff:ffff\",\n MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid address, wrong characters\n (\"2001:5f74:c4a5:b82e:100.12.13.1\",\n MSG_INVALID_INPUT_FOR_FIXED_IPS),\n # invalid format: must have :: between hex and decimal part.\n ]\n\n for ipv6, msg in invalid_ipv6:\n port_args = {'fixed_ips': [{'subnet_id': ipv4_subnet['id'],\n 'ip_address':\n IPAddress(self.cidr4.first + 1)},\n {'subnet_id': ipv6_subnet['id'],\n 'ip_address': ipv6}]}\n self.assertRaisesRegex(\n tempest_exceptions.BadRequest,\n msg % ipv6, self.create_port, network, **port_args)", "def test_IPv4_to_IP(self):\n self.assertEqual(helpers.IPv4_to_IP('0.0.0.0'), '00000000000000000000000000000000')\n self.assertEqual(\n helpers.IPv4s_to_IPs(['0.0.0.0', '136.154.62.169']),\n [\n '00000000000000000000000000000000',\n '10001000100110100011111010101001'\n ]\n )", "def test_static_nat_on_ip_from_non_src_nat_ip_range(self):\n\n # Validate the following:\n # 1. Create a new public IP range and dedicate to a account\n # 2. Acquire a IP from new public range\n # 3. Enable static NAT on acquired IP from new range\n # 4. Create a firewall rule to open up the port\n # 5. Test SSH works to the VM\n\n self.services[\"extrapubliciprange\"][\"zoneid\"] = self.services[\"zoneid\"]\n self.public_ip_range = PublicIpRange.create(\n self.apiclient,\n self.services[\"extrapubliciprange\"]\n )\n self.cleanup.append(self.public_ip_range)\n logger.debug(\"Dedicating Public IP range to the account\");\n dedicate_public_ip_range_response = PublicIpRange.dedicate(\n self.apiclient,\n self.public_ip_range.vlan.id,\n account=self.account.name,\n domainid=self.account.domainid\n )\n ip_address = PublicIPAddress.create(\n self.apiclient,\n self.account.name,\n self.zone.id,\n self.account.domainid,\n self.services[\"virtual_machine\"]\n )\n self.cleanup.append(ip_address)\n # Check if VM is in Running state before creating NAT and firewall rules\n vm_response = VirtualMachine.list(\n self.apiclient,\n id=self.virtual_machine.id\n )\n\n self.assertEqual(\n isinstance(vm_response, list),\n True,\n \"Check list VM returns a valid list\"\n )\n\n self.assertNotEqual(\n len(vm_response),\n 0,\n \"Check Port Forwarding Rule is created\"\n )\n self.assertEqual(\n vm_response[0].state,\n 'Running',\n \"VM state should be Running before creating a NAT rule.\"\n )\n\n # Open up firewall port for SSH\n fwr = FireWallRule.create(\n self.apiclient,\n ipaddressid=ip_address.ipaddress.id,\n protocol=self.services[\"natrule\"][\"protocol\"],\n cidrlist=['0.0.0.0/0'],\n startport=self.services[\"natrule\"][\"publicport\"],\n endport=self.services[\"natrule\"][\"publicport\"]\n )\n self.cleanup.append(fwr)\n\n # Create Static NAT rule\n StaticNATRule.enable(\n self.apiclient,\n ip_address.ipaddress.id,\n self.virtual_machine.id,\n self.defaultNetworkId\n )\n\n try:\n logger.debug(\"SSHing into VM with IP address %s with NAT IP %s\" %\n (\n self.virtual_machine.ipaddress,\n ip_address.ipaddress.ipaddress\n ))\n self.virtual_machine.get_ssh_client(ip_address.ipaddress.ipaddress)\n except Exception as e:\n self.fail(\n \"SSH Access failed for %s: %s\" %\n (self.virtual_machine.ipaddress, e)\n )\n\n StaticNATRule.disable(\n self.apiclient,\n ip_address.ipaddress.id,\n self.virtual_machine.id\n )", "def test_create_host_subnet(self):\n pass", "def is_valid_ipv4_address(address):\n invalid_list = ['0.0.0.0','255.255.255.255']\n try:\n ip = ipaddress.IPv4Address(address)\n if (ip.is_reserved) or (ip.is_multicast) or (ip.is_loopback) or (address in invalid_list):\n return False\n except ipaddress.AddressValueError:\n return False\n\n return True", "def test_insufficient_space(self):\n\n self._test_find_next_subnet(\n network=\"10.0.0.0/24\",\n subnets=[\"10.0.0.64/25\"],\n requests=[25],\n expected=None,\n )" ]
[ "0.73434836", "0.6952464", "0.6907861", "0.67323136", "0.67003703", "0.65595746", "0.65477157", "0.6545432", "0.64953184", "0.6441315", "0.6403229", "0.6263367", "0.62485737", "0.6163018", "0.6136902", "0.6079953", "0.5962481", "0.5961101", "0.5927232", "0.5911758", "0.5902653", "0.5898684", "0.5889632", "0.58718187", "0.58271235", "0.58204734", "0.58161163", "0.5808385", "0.58079004", "0.5805499" ]
0.76985526
0
Function implements sieve of Eratosthenes (for all numbers uptil N). Returns array erat_sieve If erat_sieve[i] is True, then 2i + 3 is a prime.
def sieve_of_erat(N): erat_sieve = [True]*int(N/2) prime_list = [] prime_list.append(2) for i in range(int((math.sqrt(N)-3)/2)+1): # Only need to run till sqrt(n) if erat_sieve[i] == True: j = i + (2*i+3) while j < int(N/2): erat_sieve[j] = False j += (2*i+3) for i in range(int(N/2)): if erat_sieve[i] == True: prime_list.append(2*i+3) return erat_sieve, prime_list
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)", "def sieve_of_erat(N):\n lim = int(N/2)\n if N % 2 == 0:\n lim -= 1\n erat_sieve = [True]*lim\n prime_list = []\n prime_list.append(2)\n for i in range(int((sqrt(N)-3)/2)+1): # Only need to run till sqrt(n)\n if erat_sieve[i] == True:\n j = i + (2*i+3)\n while j < lim:\n erat_sieve[j] = False\n j += (2*i+3)\n for i in range(lim):\n if erat_sieve[i] == True:\n prime_list.append(2*i+3)\n \n return erat_sieve, prime_list", "def primeSieve(n):\n\tsieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n\tfor i in xrange(1,int(n**0.5)/3+1):\n\t\tif sieve[i]:\n\t\t\tk=3*i+1|1\n\t\t\tsieve[ k*k/3 ::2*k] = False\n\t\t\tsieve[k*(k-2*(i&1)+4)/3::2*k] = False\n\treturn numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def sieve_for_primes_to(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def sieve(n):\n #All even numbers except 2 are not primes\n primes = [False, False, True] + [True, False] * (n / 2)\n\n #Start with 3\n p = 3\n\n while p*p <= n:\n if primes[p]:\n #p is prime, cross off all multiples of p, starting at the square \n #of p since all smaller multiples have already been crossed off\n d = p*p\n while d <= n:\n primes[d] = False\n d += p\n p += 2\n\n #Build a list of the primes we've found\n return [i for i in range(n) if primes[i]]", "def sieve(n):\n if n < 2:\n return []\n s = [True] * (n + 1)\n s[0], s[1] = False, False\n sq = int(n ** 0.5)\n for i in range(2, sq + 1):\n if s[i]:\n m = n // i - i\n s[i * i : n + 1 : i] = [False] * (m + 1)\n return [i for i in range(n + 1) if s[i]]", "def sieve(n):\n s = [True] * (n + 1)\n for i in range(2, isqrt(n) + 1):\n if s[i]:\n for j in range(i + i, n + 1, i):\n s[j] = False\n return [i for i in range(2, n + 1) if s[i]]", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]", "def sieve(n):\n\n primes = []\n sieve = [0] * n\n\n for i in range(2, n):\n if sieve[i] == 0:\n primes.append(i)\n sieve[i*i:n:i] = [1] * slice_length(i*i, n, i)\n\n return primes", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def sieve_of_eratosthenes(n):\n primes = [True] * (n + 1)\n # because p is the smallest prime\n p = 2\n\n while p * p <= n:\n # if p is not marked as False, it is a prime\n if primes[p]:\n # mark all the multiples of number as False\n for i in range(p * 2, n + 1, p):\n primes[i] = False\n p += 1\n\n # getting all primes\n primes = [element for element in range(2, n + 1) if primes[element]]\n\n return primes", "def sieve(n):\n\tif n < 2:\n\t\treturn []\n\telse:\n\t\tis_prime = [True] * n\n\t\tis_prime[0] = is_prime[1] = False\n\t\tfor i in range(2, n):\n\t\t\tif is_prime[i]:\n\t\t\t\tyield i\n\t\t\t\tfor num in range(i*i, n, i):\n\t\t\t\t\tis_prime[num] = False", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def numpy_sieve(num):\n # array of True values for sieve\n primes = np.ones(num, dtype=bool)\n\n # 0 and 1 are not prime\n primes[0] = primes[1] = False\n\n # filter out non-prime values\n for i in range(2, int(np.sqrt(num) + 1)):\n if primes[i]:\n primes[i * i :: i] = False\n\n # extract prime numbers\n primes = np.flatnonzero(primes)\n\n return primes", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def sieve(upto):\n return list(prime_numbers(upto))", "def primesfrom2to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=np.bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]" ]
[ "0.7732913", "0.7636703", "0.7548963", "0.7507189", "0.7439305", "0.7436849", "0.7405845", "0.73973405", "0.73973405", "0.7349964", "0.73457396", "0.7325795", "0.7229576", "0.72279704", "0.7197789", "0.7099972", "0.7085385", "0.70812255", "0.7055499", "0.70475835", "0.70274305", "0.70205", "0.7012367", "0.69902474", "0.6985364", "0.696602", "0.69618267", "0.6911268", "0.69068456", "0.6897834" ]
0.77103347
1
Handle file(s) arguments from command line This method takes the string(s) which were passed to the cli which indicate the files on which to operate. It expands the path arguments and creates a list of `pathlib.Path` objects which unambiguously point to the files indicated by the cli arguments.
def handle_files_args(*paths_args): paths = [] for paths_arg in paths_args: # Handle paths implicitly rooted at user home dir paths_arg = os.path.expanduser(paths_arg) # Expand wildcards paths_arg = glob.glob(paths_arg) # Create list of pathlib.Path objects paths.extend([pathlib.Path(path_arg) for path_arg in paths_arg]) return paths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parser():\n \n \n parser = ap.ArgumentParser(description='Parsing some file names in various forms')\n group = parser.add_mutually_exclusive_group()\n group.add_argument('-f','--filepaths',dest='filepaths',metavar='PATH1,PATH2,...',type=str,\n help='Input a string or list of strings. ex. '\n '\"C:\\\\Users\\\\Matt\\\\Desktop\\\\FireEyeCodingSample\\\\learn.txt\",'\n '\"foo\\\\bar\\\\fudge\"''Note: On Unix systems, you need quotes or you need to escape your escape characters.'\n 'Otherwise, your resultant tokens may be incorrect.')\n group.add_argument('-i','--input',dest='file',metavar='TEXTFILE',type=str,\n help='Input a valid file path to a text file of '\n 'paths. ex. '\n '\"C:\\\\Users\\\\Matt\\\\Desktop\\\\FireEyeCodingSample\\\\learn.txt\"')\n \n \n args = parser.parse_args()\n\n if args.file and not os.path.isfile(args.file):\n raise OSError('This file does not exist - please input a valid file path')\n \n return args", "def main(self):\n\n argprs = argvparse.Argparse()\n\n lines = []\n\n if not argprs.files:\n self.read_user_input()\n lines.append(self.commandline)\n self.send_lines_to_finditer(argprs.regex, lines,\n argprs.underscore, argprs.color, argprs.machine)\n else:\n # print argprs.files\n for fl in argprs.files:\n try:\n filerd = fileread.Fileread(fl)\n self.send_lines_to_finditer(argprs.regex, filerd.lines,\n argprs.underscore, argprs.color, argprs.machine,\n filerd.shortfilename)\n except Exception as e:\n print str(e), \"\\n\"", "def preprocess_argument_list(args: List[str], cwd: Union[str, None] = None) -> List[str]:\n\n if cwd is None:\n cwd = os.getcwd()\n\n result = []\n\n while len(args) > 0:\n arg = args.pop(0)\n\n if arg == \"-I\" or arg == \"--include\":\n if len(args) == 0:\n raise click.ClickException(\"include option must be followed by a file path\")\n else:\n # include statement in files are relative to that file, so we need to\n # provide the file's path to ourselves\n file_path = args.pop(0)\n if not os.path.isabs(file_path):\n file_path = os.path.join(cwd, file_path)\n dir_path = os.path.dirname(file_path)\n\n with open(file_path, \"r\") as f:\n result.extend(preprocess_argument_list(extract_arguments(f), dir_path))\n else:\n result.append(arg)\n\n return result", "def command_line(argv):\r\n arguments = parse_command_line(argv)\r\n return edit_files(arguments.patterns, arguments.expressions,\r\n start_dir=arguments.start_dir,\r\n max_depth=arguments.max_depth,\r\n dry_run=arguments.dry_run,\r\n output=arguments.output)", "def args_to_input_file_list(arg):\n # Check if the input file is a directory.\n if os.path.isdir(arg[0]):\n print \"Provided directory.\"\n file_list = [arg[0] + \"/\" + \n f for f in os.listdir(arg[0])]\n else:\n file_list = arg\n return file_list", "def _parse_arguments():\n import argparse\n\n parser = argparse.ArgumentParser(description=__doc__)\n\n parser.add_argument(\n 'list_of_files', type=str,\n help='Input ASCII file with a list of files to be downloaded')\n\n return parser.parse_args()", "def parse_command_line_args(self) -> None:\n self.parser.add_argument(\n \"-i\",\n \"--input\",\n help=\"(str) [default: .] The relative folder path with the csv files\",\n default=getcwd()\n )\n self.parser.add_argument(\n \"-o\",\n \"--output\",\n help=\"(str) [default: .] The folder path for saving the *.alfredsnippets files\",\n default=getcwd()\n )\n self.parser.add_argument(\n \"-f\",\n \"--fieldorder\",\n help=\"(str) [default: 'abbreviation, content, name'] A comma separated list for the order of the fields \"\n \"of the csv files\",\n default=\"abbreviation, content, name\"\n )\n self.parser.add_argument(\n \"-d\",\n \"--deletefolders\",\n help=\"(bool) [default=False] Delete the folders that contains the json files\",\n type=self.str2bool,\n nargs='?',\n const=True,\n default=False\n )\n self.parser.add_argument(\n \"-l\",\n \"--lplaceholder\",\n help=\"(str) [default: %] The left side placeholder for the embedded snippets.\",\n default=\"%\"\n )\n self.parser.add_argument(\n \"-r\",\n \"--rplaceholder\",\n help=\"(str) [default: %] The right side placeholder for the embedded snippets.\",\n default=\"%\"\n )\n\n self.parser.add_argument(\n \"-c\",\n \"--changeplaceholders\",\n help=\"(bool) [default=True] Set to false if the placeholder shouldn't get changed at all\",\n type=self.str2bool,\n nargs='?',\n const=True,\n default=True\n )\n\n self.args = self.parser.parse_args()", "def _parse_args_files(self, filematch):\n files, start_pos = [], 0\n while True:\n pos_a = self.cmd.find(filematch, start_pos)\n if pos_a > 0:\n pos_b = self.cmd.find(' ', pos_a)\n if pos_b > 0:\n files.append(self.cmd[pos_a:pos_b])\n else:\n files.append(self.cmd[pos_a:])\n start_pos = pos_b\n else:\n return files", "def _parse_launch_args(args, logger):\n if not args:\n return []\n\n if isinstance(args, str):\n args = [args]\n\n override_list = []\n\n # Now look for any option and conf file arguments:\n bad = False\n for arg in args:\n m = re.match(r'''(?x)\n (?P<section>[a-zA-Z][a-zA-Z0-9_]*)\n \\.(?P<option>[^=]+)\n =(?P<value>.*)$''', arg)\n # check if argument is a explicit variable override\n if m:\n section = m.group('section')\n key = m.group('option')\n value = m.group('value')\n override_list.append((section, key, value))\n continue\n\n filepath = arg\n # check if argument is a path to a file that exists\n if not os.path.exists(filepath):\n logger.error(f'Invalid argument: {filepath}')\n bad = True\n continue\n\n # expand file path to full path\n filepath = os.path.realpath(filepath)\n\n # path exists but is not a file\n if not os.path.isfile(filepath):\n logger.error(f'Conf is not a file: {filepath}')\n bad = True\n continue\n\n # warn and skip if file is empty\n if os.stat(filepath).st_size == 0:\n logger.warning(f'Conf file is empty: {filepath}. Skipping')\n continue\n\n # add file path to override list\n override_list.append(filepath)\n\n # exit if anything went wrong reading config arguments\n if bad:\n sys.exit(2)\n\n return override_list", "def parse_args():\n parser = argparse.ArgumentParser()\n \n parser.add_argument('--p', dest='path_in',\n action='store', type=str, required=True, default='',\n help=\"Path relative to the data/ directory, to the input ATL01, ANC13, and ANC27 files.\")\n parser.add_argument('--atl01', dest='atl01_file',\n action='store', type=str, required=False, default=None,\n help=\"Path + filename to directory of the ATL01.\")\n parser.add_argument('--anc13', dest='anc13_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to outputs directory of the ANC13.\") \n parser.add_argument('--anc27', dest='anc27_path',\n action='store', type=str, required=False, default=None,\n help=\"Path to directory of the ANC27.\")\n\n args = parser.parse_args()\n \n return args", "def parse_args():\r\n parser = argparse.ArgumentParser(description=\"Available Options\")\r\n\r\n parser.add_argument('-i'\r\n ,'--input_path'\r\n ,dest='input_path'\r\n ,type=is_valid_path\r\n ,required=True\r\n ,help = \"Enter the path of the image file to process\")\r\n\r\n args = vars(parser.parse_args())\r\n\r\n #To Display The Command Line Arguments\r\n print(\"## Command Arguments #################################################\")\r\n print(\"\\n\".join(\"{}:{}\".format(i,j) for i,j in args.items()))\r\n print(\"######################################################################\")\r\n\r\n return args", "def arg_parse(args):\r\n path = find_arg('-file', args)\r\n names = args\r\n if path is not None:\r\n names.remove('-file')\r\n names.remove(path)\r\n if len(names) is 0:\r\n names = ['Player']\r\n return names, path", "def _input_args(self, args: List[str]):\n assert self._call is None, f\"You need to specify all inputs before calling `{self._call}`\"\n assert isinstance(args, list), f\"{args} is a {type(args)}, expected a list of strings!\"\n assert len(args) > 0, f\"Expected a non-empty argument list!\"\n assert all(isinstance(a, str) for a in args), f\"Expected a list of strings, not {[type(a) for a in args]}!\"\n # all arguments could potentially be filenames that we write to, so let's just add them\n self._write_files |= set(args)\n # add dummy argument zero\n args = [\"\"] + args\n # allocate args in memory\n arg_strings = [self._str(a, \"arg\") for a in args]\n # allocate a pointer array for argv\n self.data += [f\"argv: .word \" + \" \".join(\"0\" for _ in range(len(args)))]\n # load argc and argv\n self._args += [\"\", \"# argument count in a0\", f\"li a0, {len(args)}\"]\n self._args += [\"\", \"# load pointers to argument strings into argv\", f\"la a1, argv\"]\n for ii, aa in enumerate(arg_strings):\n self._args += [f\"la t1, {aa}\", f\"sw t1, {ii * 4}(a1)\"]", "def handle_cmdline_args():\n\n parser = argparse.ArgumentParser(\n description='Generate synthetic data from a specification in a json '\n 'file using the \"synth-method\" described in the json file. ')\n\n parser.add_argument(\n '-i', dest='infile', required=True,\n help='The input json file. Must contain a \"synth-method\" property')\n\n parser.add_argument(\n '-o', dest='outfile_prefix', required=True, help='The prefix of the output paths (data json and csv), relative to the QUIPP-pipeline root directory')\n\n args = parser.parse_args()\n return args", "def parse_argument():\n parser = argparse.ArgumentParser(description='Parsing a file.')\n parser.add_argument('--path', nargs=1, required=True)\n args = vars(parser.parse_args())\n return args", "def get_args():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"files\", nargs=\"+\")\n # save figs\n parser.add_argument(\"-s\", \"--save\", action=\"store_const\", const=True, default=False)\n return parser.parse_args()", "def command_line_arguments():\n\n try:\n parser = argparse.ArgumentParser(description='Log Handler/Cleaner/Copier for Idemia DocAuth')\n\n # Add required arguments.\n parser.add_argument('action', choices=['clean', 'download'], type=str, help='clean or download')\n\n # Parse the arguments\n args = parser.parse_args()\n\n return args\n\n except Exception as err:\n print(err)\n return", "def process_args(args, multiple_chrs=False, tool_name=\"\"):\r\n # Retrieves the dataset.\r\n dataset, chrom, tool = None, None, None\r\n\r\n dataset = DATASETS.get(args.dataset, None)\r\n if not dataset:\r\n print \"Unknown dataset %s.\" % args.dataset\r\n return dataset, chrom, tool\r\n dataset.set_work_dir(args.path)\r\n\r\n # Retreieves the Chromosome(s).\r\n if multiple_chrs:\r\n chrom = [dataset.get_chr(chr_num) for chr_num in args.chrs]\r\n else:\r\n chrom = dataset.get_chr(args.chr)\r\n\r\n # Retrieves the tool.\r\n if tool_name:\r\n full_name = get_tool(tool_name)\r\n if not full_name:\r\n return dataset, chrom, tool\r\n tool = TOOLS[full_name]\r\n tool.set_work_dir(args.path)\r\n\r\n return dataset, chrom, tool", "def add_positional_arguments(arguments_parser):\n arguments_parser.add_argument('paths', type=str, nargs='*')", "def parse_arguments():\n parser = ArgumentParser()\n parser.add_argument(\"-p\", \"--path\", required=True, nargs=1,\n dest=\"file_path\", type=str, action=\"store\",\n help=\"path to the root directory\")\n parser.add_argument(\"-q\", \"--quick\", action=\"store_true\",\n default=False, dest=\"quick\",\n help=\"Speed up the search or not\")\n return parser.parse_args()", "def parse_args():\n parser = ArgumentParser(description='Count lines.')\n\n parser.add_argument(\n 'path',\n metavar='PATH')\n\n parser.add_argument(\n 'patterns',\n metavar='PATTERN',\n nargs='*')\n\n parser.add_argument(\n '-a', '--absolute',\n dest='absolute',\n action='store_true',\n help='show absolute paths in details (overrides `-r`)')\n\n parser.add_argument(\n '-d', '--details',\n dest='details',\n action='store_true',\n help='show details for each file')\n\n parser.add_argument(\n '-r', '--relative',\n dest='relative',\n action='store_true',\n help='show relative paths in details')\n\n return parser.parse_args()", "def __parse_args(self):\n for argument in self.args:\n source_arg = re.match(\"^(--source=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n input_arg = re.match(\"^(--input=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n stats_arg = re.match(\"^(--stats=(([A-Z]|[a-z]|/|_|[0-9]|.)+))$\", argument)\n help_arg = re.match(\"^--help$\", argument)\n vars_arg = re.match(\"^--vars$\", argument)\n insts_arg = re.match(\"^--insts$\", argument)\n if source_arg:\n self.sourceFile = source_arg.group(2)\n self.passedArgs.append(\"source\")\n elif input_arg:\n self.inputFile = input_arg.group(2)\n self.passedArgs.append(\"input\")\n elif help_arg:\n print(\"napoveda\")\n sys.exit(0)\n elif stats_arg:\n self.statsFile = stats_arg.group(2)\n self.passedArgs.append(\"stats\")\n elif vars_arg:\n self.passedArgs.append(\"vars\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"vars\"\n elif insts_arg:\n self.passedArgs.append(\"insts\")\n if self.first_stat_arg is None:\n self.first_stat_arg = \"insts\"\n else:\n raise ArgError(\"Unknown argument or format of the argument! (\" + argument + \")\")", "def do_files(self, args):\n file_names = self.regexprutils.get_file_names()\n print 'File names:'\n for name in file_names:\n print ' %s' % (name, )", "def parse_args():\n parser = argparse.ArgumentParser(\n description=\"Trim spaces at the end of every lines.\"\n )\n parser.add_argument(\"-R\", \"-r\", action=\"store_true\", help=\"Whether to recursive\")\n parser.add_argument(\"-y\", \"--yes\", action=\"store_true\", help=\"No ask\")\n parser.add_argument(\n \"-t\", \"--type\", default=\"*\", help=\"Filter file type(Example: *.py)\"\n )\n parser.add_argument(\"-d\", \"--dir\", default=\"\", help=\"The directory path\")\n parser.add_argument(\n \"files\",\n nargs=\"+\",\n default=[],\n metavar=\"*.py\",\n help=\"files or directories\",\n )\n return parser.parse_args()", "def _parse_args():\n parser = argparse.ArgumentParser(description='Run DAFI.')\n parser.add_argument('input_file', help='Name (path) of input file')\n return parser.parse_args()", "def parse_arguments(args):", "def get_filenames_from_args() -> tuple[str, str]:\n args = sys.argv\n\n try:\n words_filename = args[1]\n qwerty_filename = args[2]\n\n return words_filename, qwerty_filename\n except IndexError:\n msg = \"Both file paths were not provided.\\n\"\\\n + \"Run the program using the following command format:\\n\"\\\n + \"python3 spellfixer.py <words-filepath> <keyboards-filepath>\"\n print(msg)\n exit(1)", "def test_argparser():\n for template in templates:\n argv = [template] + list(example_xyz_files)\n\n parser = argparser()\n parser.parse_args(argv)", "def test_file_args(self):\n spec = {\n 'input': [\n '$[[codeFile]]'\n ]\n }\n parameters = pd.create_parameter_index([\n {\n 'id': 'codeFile',\n 'datatype': 'file',\n 'defaultValue': 'src/helloworld.py',\n 'as': 'code/helloworld.py'\n }\n ])\n # Test default values (no arguments)\n wf = tmpl.replace_args(\n spec=spec,\n parameters=parameters,\n arguments=dict()\n )\n assert wf['input'] == ['code/helloworld.py']\n # Test default values (with arguments)\n wf = tmpl.replace_args(\n spec=spec,\n parameters=parameters,\n arguments=pr.parse_arguments(\n arguments={'codeFile': FileHandle(filepath='/dev/null')},\n parameters=parameters\n )\n )\n assert wf['input'] == ['code/helloworld.py']\n # Test file parameters without constant value\n parameters = pd.create_parameter_index([\n {\n 'id': 'codeFile',\n 'datatype': 'file',\n 'defaultValue': 'src/helloworld.py'\n }\n ])\n # Test default values (no arguments)\n wf = tmpl.replace_args(\n spec=spec,\n parameters=parameters,\n arguments=dict()\n )\n assert wf['input'] == ['src/helloworld.py']\n wf = tmpl.replace_args(\n spec=spec,\n parameters=parameters,\n arguments=pr.parse_arguments(\n arguments={'codeFile': FileHandle(filepath='/dev/null')},\n parameters=parameters\n )\n )\n assert wf['input'] == ['null']", "def ProcessCommandLine():\n\tparser = argparse.ArgumentParser(description=__doc__.strip())\n\n\tparser.add_argument('--calibrate', action='store_true', default=False,\n help='Create pixel position calibration using camera')\n\tparser.add_argument('--calibrate-rgb', metavar='R,G,B', type=str, default='50,50,50',\n help='Calibration RGB value 0-255,0-255,0-255')\n \tparser.add_argument('--calibration-name', metavar='FILENAME', type=str, default=None,\n help='Pixel calibration name to create/read')\n\tparser.add_argument('--plot', action='store_true', default=False,\n help='Plot the calibration')\n\tparser.add_argument('--clear', action='store_true', default=False,\n help='Switch of all pixels')\n\tparser.add_argument('--debug', action='store_true', default=False,\n\t\t\t help='Print back trace in event of exception')\n\n\n\treturn parser.parse_args()" ]
[ "0.6453815", "0.6248218", "0.6234054", "0.6233103", "0.6151754", "0.60948", "0.59932786", "0.59910625", "0.594788", "0.5934068", "0.5930487", "0.5909474", "0.58889955", "0.587302", "0.585867", "0.58419037", "0.58233815", "0.5807165", "0.57825506", "0.5780803", "0.5777409", "0.57502365", "0.5738378", "0.57374895", "0.5724163", "0.5700969", "0.56781864", "0.5661558", "0.5648501", "0.5647225" ]
0.6813794
0
Factory method to return child of RefFile This method returns either a BibFile or NonbibFile object depending on which is appropriate based on if the `path` arg points to a file containing valid BibTeX or invalid BibTeX, respectively.
def reffile_factory(path): try: b = BibFile(path) except UnparseableBibtexError: b = NonbibFile(path) return b
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fileobj(path_or_file, mode='r'):\n if isinstance(path_or_file, basestring):\n try:\n return open(path_or_file, mode)\n except:\n return closing(StringIO())\n else:\n return closing(path_or_file)", "def get(self, path, content=True, type=None, format=None):\n result = super().get(path, content, type, format)\n if type is None:\n type = self.guess_type(path)\n elif type != \"notebook\":\n return result\n # Now only handling notebooks\n if content:\n # look for the splitted file\n splitted_uri = self._get_splitted_uri(path)\n if self.file_exists(splitted_uri):\n splitted_data = super().get(splitted_uri, True, \"file\")\n result = self._merge_model(result, splitted_data)\n\n return result", "def get_file(self, path):\n b_file = self.get_b_file(path)\n if b_file:\n return b_file\n return self.get_a_file(path)", "def from_path(cls, ref_full_path, **kwargs):\n try:\n with open(ref_full_path) as fp:\n obj = yaml.load(fp, Loader=YamlLoader)\n _kwargs = {key: value for key, value in obj.items() if key not in ('data', 'from_base64')}\n kwargs.update(_kwargs)\n return cls(obj['data'], from_base64=True, **kwargs)\n\n except IOError as ex:\n if ex.errno == errno.ENOENT:\n return None", "def file(self, path: str) -> File:\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def create_file_path_field(path: Union[Path, str], path_is_absolute: bool = False) -> Path:\n default_path = build_path(path, path_is_absolute)\n # noinspection Mypy\n field_instance: Path = field(default=default_path, init=False, metadata=metadata_dataclasses_json)\n return field_instance", "def __init__(self, path, ref='HEAD'):\n\n if not isinstance(path, aug.RepoPath):\n path = aug.RepoPath(path)\n\n self.path = path\n self.ref = ref", "def get_file(self, name, internal=True, fieldids=None, fieldnames=None):\n if self.remote:\n return DBSFileRemote(self.remote, name, internal=internal, fieldnames=fieldnames, fieldids=fieldids)\n\n if name.find('::') >= 0:\n # This is a full path name to a subfile. \n dd = DD(subfile_path=name)\n else:\n # top-level file - dd entry defines the storage.\n dd = DD(name)\n\n if dd.fileid is None:\n raise FilemanError(\"\"\"DBS.get_file() : File not found [%s]\"\"\" % name)\n return DBSFile(dd, internal=internal, fieldids=fieldids, fieldnames=fieldnames, ext_filename=name)", "def _get_file_object(inputfile=None):\n if type(inputfile) == str:\n return open(inputfile, 'r')\n return inputfile", "def get_file_object(self):\n try:\n # FieldFile.open() and File.open() don't return file objects, so\n # accessing it directly\n return self.datafile.file.file # FileStoreItem.FieldFile.File.file\n except ValueError as exc:\n logger.error(\"Error opening %s: %s\", self.datafile, exc)\n return None", "def construct_bibfile_data(*paths):\n bibs = [reffile_factory(path) for path in paths]\n return bibs", "def try_as_file(inp):\n file = pathlib.Path(inp)\n\n if not file.is_absolute():\n file = pathlib.Path.cwd() / file\n\n if not file.exists():\n return None\n\n try:\n # this will throw if it is a symlink that has a loop in it so that it\n # never points to a base file.\n if file.is_file():\n return file\n except OSError as ex:\n raise Except.FunctionError(\"resolving file '{}' failed: {}\".format(\n file, ex.strerror.lower() ) )\n return None", "def file(self, path: str) -> \"File\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def file(self, path: str) -> \"File\":\n _args = [\n Arg(\"path\", path),\n ]\n _ctx = self._select(\"file\", _args)\n return File(_ctx)", "def _get_file_object(infilename):\n\n _, extension = os.path.splitext(infilename)\n if extension.lower() == '.spe':\n return parsers.SpeFile(infilename)\n elif extension.lower() == '.spc':\n return parsers.SpcFile(infilename)\n elif extension.lower() == '.cnf':\n return parsers.CnfFile(infilename)\n else:\n raise NotImplementedError(\n 'File type {} can not be read'.format(extension))", "def _open(self, file_path=None):\n\t\tif file_path is None:\n\t\t\tfile_path = self.file_path\n\n\t\tif not os.path.exists(file_path):\n\t\t\traise ValueError('Could not find file: {}'.format(file_path))\n\n\t\ttry:\n\t\t\tf = open(file_path, encoding='utf-8', newline='')\n\t\texcept OSError as err:\n\t\t\tself.log.error(str(err))\n\t\t\traise ValueError('Could not open file: {}'.format(file_path))\n\n\t\treturn f", "def read_file(self, base_path, path, content_class=Page, fmt=None,\r\n context=None, preread_signal=None, preread_sender=None,\r\n context_signal=None, context_sender=None):\r\n\r\n path = os.path.abspath(os.path.join(base_path, path))\r\n source_path = os.path.relpath(path, base_path)\r\n logger.debug('read file {} -> {}'.format(\r\n source_path, content_class.__name__))\r\n\r\n if not fmt:\r\n _, ext = os.path.splitext(os.path.basename(path))\r\n fmt = ext[1:]\r\n\r\n if fmt not in self.readers:\r\n raise TypeError(\r\n 'Pelican does not know how to parse {}'.format(path))\r\n\r\n if preread_signal:\r\n logger.debug('signal {}.send({})'.format(\r\n preread_signal, preread_sender))\r\n preread_signal.send(preread_sender)\r\n\r\n reader = self.readers[fmt]\r\n\r\n metadata = default_metadata(\r\n settings=self.settings, process=reader.process_metadata)\r\n metadata.update(path_metadata(\r\n full_path=path, source_path=source_path,\r\n settings=self.settings))\r\n metadata.update(parse_path_metadata(\r\n source_path=source_path, settings=self.settings,\r\n process=reader.process_metadata))\r\n\r\n content, reader_metadata = self.get_cached_data(path, (None, None))\r\n if content is None:\r\n content, reader_metadata = reader.read(path)\r\n self.cache_data(path, (content, reader_metadata))\r\n metadata.update(reader_metadata)\r\n\r\n if content:\r\n # find images with empty alt\r\n find_empty_alt(content, path)\r\n\r\n # eventually filter the content with typogrify if asked so\r\n if self.settings['TYPOGRIFY']:\r\n from typogrify.filters import typogrify\r\n if content:\r\n content = typogrify(content)\r\n metadata['title'] = typogrify(metadata['title'])\r\n if 'summary' in metadata:\r\n metadata['summary'] = typogrify(metadata['summary'])\r\n\r\n if context_signal:\r\n logger.debug('signal {}.send({}, <metadata>)'.format(\r\n context_signal, context_sender))\r\n context_signal.send(context_sender, metadata=metadata)\r\n\r\n return content_class(content=content, metadata=metadata,\r\n settings=self.settings, source_path=path,\r\n context=context)", "def to_filehandle(fname, flag='r', return_opened=False, encoding=None):\n if is_string_like(fname):\n fh = Path(fname).open(mode=flag)\n opened = True\n elif isinstance(fname, Path):\n fh = fname.open(mode=flag)\n elif hasattr(fname, 'seek'):\n fh = fname\n opened = False\n else:\n raise ValueError('fname must be a pathlib Path, string or file handle')\n if return_opened:\n return fh, opened\n return fh", "def _open(path):\n try:\n fileobj = File(os.fspath(path))\n if fileobj is None:\n raise MutagenError\n if getattr(fileobj, 'tags', None) is None:\n fileobj.add_tags()\n return fileobj\n except (AttributeError, MutagenError):\n raise exceptions.NotAnAudioFileError(path)", "def load_bib(bib_name):\n print(\"Reading BibTex File: {}\".format(bib_name))\n curdir = osp.abspath('.')\n bib_path = osp.join(curdir, bib_name)\n print(\"Path: {}\".format(bib_path))\n print('Creating library..')\n add_dir('library')\n with open(bib_path, 'r') as f:\n # txt = f.read()\n line = f.readline()\n i = 0\n start = False\n while line:\n i += 1\n if (line.find('@')==1) or start: # reading entry\n if start == False:\n filename = get_name(line)\n start = True\n if line.find('title')==1:\n link = get_link(line)\n if link is not None:\n savepath = osp.join(curdir, 'library', filename+'.pdf')\n save_pdf(link, savepath)\n if (line.find('}')==1): # end of entry\n start=False\n line = f.readline()\n print(i) # print line number", "def _pyre_open(\n self, uri: pyre.primitives.urilike, mode: str, **kwds\n ) -> typing.Optional[File]:\n # parse the {uri}, using {file} as the default scheme\n uri = pyre.primitives.uri.parse(value=uri, scheme=\"file\")\n # and extract the {scheme}\n scheme = uri.scheme\n # if the {scheme} points to a local path\n if scheme == \"file\":\n # make a local {file} object whose path is the {address} of the {uri} and return it\n return File()._pyre_local(uri=uri.address, mode=mode, **kwds)\n # if we get this far, the {uri} was malformed; make a channel\n channel = journal.error(\"pyre.h5.reader\")\n # complain\n channel.line(f\"could not open an h5 file\")\n channel.line(f\"with the given uri '{uri}':\")\n channel.line(f\"the scheme '{scheme}' is not supported\")\n # flush\n channel.log()\n # and bail, in case errors aren't fatal\n return", "def get_file(self, file_path):\n try:\n return self._files[file_path]\n except KeyError:\n file = File()\n self._files[file_path] = file\n return file", "def is_file (self, path=None, ttype=None) :\n if path : return self._adaptor.is_file (path, ttype=ttype)\n else : return self._adaptor.is_file_self ( ttype=ttype)", "def from_pdf(path):\n raw_regexes = [\n r\"\"\"<prism:doi>(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</prism:doi>\"\"\",\n r\"\"\"[\"'](?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)[\"']\"\"\",\n r\"\"\"URI\\s*\\(https?://doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n r\"\"\"URI\\s*\\((?:https?://)?www.nature.com/doifinder/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\\s*>\"\"\",\n # This one works for some ACIE papers, but is too risky. It matches\n # against DOIs of cited papers too. Better to use WPS-ARTICLEDOI.\n # r\"\"\"/URI\\(https?://(?:dx)?.doi.org/(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"/WPS-ARTICLEDOI\\s*\\((10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"\\((?:doi|DOI):\\s*(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)\\)\"\"\",\n r\"\"\"<rdf:li.+>(?:doi|DOI):(10.\\d{4,9}/[-._;()/:a-zA-Z0-9]+)</rdf:li>\"\"\",\n ]\n regexes = [re.compile(regex) for regex in raw_regexes]\n class _DOIFound(Exception):\n pass\n\n p = Path(path)\n if not (p.exists() or p.is_file()):\n return _error(f\"from_pdf: invalid path '{p}' given\")\n\n strings = subprocess.Popen([\"strings\", p], stdout=subprocess.PIPE)\n grep = subprocess.Popen([\"grep\", \"-i\", \"doi\"], stdin=strings.stdout, stdout=subprocess.PIPE)\n try:\n for line in grep.stdout:\n line = line.decode(_g.gpe).strip()\n for regex in regexes:\n match = regex.search(line)\n if match:\n raise _DOIFound(match.group(1))\n except _DOIFound as e:\n doi = e.args[0]\n # Prune away any extra parentheses at the end.\n nopen = doi.count('(')\n nclose = doi.count(')')\n if nopen != nclose:\n doi = doi.rsplit(')', maxsplit=(nclose - nopen))[0]\n # Report success.\n return DOI(doi)\n else:\n return _error(f\"from_pdf: could not find DOI from '{p}'\")", "def resolve_ref_hierarchy(self, path):\n\n project, ref, refPrefix = self.resolve_partial_ref_prefix(path)\n if not ref:\n return None\n\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n return Entity(\n EntityType.REF_LEVEL,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'refPrefix': refPrefix}\n )", "def _file_handle(file_ref, mode='r'):\n if not mode in 'rw':\n raise ValueError(\"mode must be 'r' or 'w'\")\n\n def _is_string_like(obj):\n try:\n obj + ''\n except (TypeError, ValueError):\n return False\n return True\n\n try:\n if _is_string_like(file_ref):\n if file_ref.endswith('.gz'):\n import gzip\n fh = gzip.open(file_ref, mode='%sb' % mode)\n else:\n if mode == 'r':\n fh = open(file_ref, 'U')\n else:\n fh = open(file_ref, 'w')\n else:\n fh = file_ref\n except TypeError:\n raise ValueError('input file must be a path or file handle')\n\n return fh", "def __init__(self, file_path: Optional[Union[str, os.PathLike]] = None):\n path = Path(file_path).resolve() if file_path else None\n self._path = path\n self._path_spec = None", "def get_opener(path):\n path = pathlib.Path(path)\n return _FILE_FORMATS.get(path.suffix, open)", "def resolve_repository_entry(self, path):\n\n project, ref, remainingPath = self.resolve_ref_prefix(path)\n if not ref or remainingPath.as_posix() == '.':\n return None\n\n # List parent directory to retrieve entry attributes\n entry = self.get_entry_properties(project, ref, remainingPath.as_posix())\n\n # Approximate entry age by last commit to containing ref\n refTime = iso8601.parse_date(ref.commit['committed_date']).timestamp()\n\n if entry != None:\n if entry['type'] == 'blob':\n fileSize = self.cache.get_file_size(project, ref, remainingPath.as_posix())\n\n # Approximate file age more accurately by its last commit timestamp\n if self.commitTimes:\n entryTime = self.cache.get_file_commit_timestamp(project, ref, remainingPath.as_posix())\n else:\n entryTime = refTime\n\n # Convert mode and strip write bits\n permissions = int(entry['mode'][-3:], 8) & 0o555\n\n return Entity(\n EntityType.REPOSITORY_FILE,\n path,\n create_file_attributes(permissions, entryTime, fileSize),\n {'project': project, 'ref': ref, 'file': entry}\n )\n elif entry['type'] == 'tree':\n return Entity(\n EntityType.REPOSITORY_DIR,\n path,\n create_directory_attributes(refTime),\n {'project': project, 'ref': ref, 'directory': entry}\n )\n\n return None", "def __init__(\n self,\n file_or_path: Union[str, Path, BinaryIO] = './',\n mode: str = 'r',\n *,\n distname: Optional[str] = None,\n version: Optional[Union[str, Version]] = None,\n build_tag: Optional[Union[int, str]] = None,\n language_tag: Optional[str] = None,\n abi_tag: Optional[str] = None,\n platform_tag: Optional[str] = None\n ) -> None:\n assert not isinstance(file_or_path, io.TextIOBase), (\n \"Text buffer given where a binary one was expected.\"\n )\n\n if 'a' in mode:\n # Requires rewrite feature\n raise NotImplementedError(\n \"Append mode is not supported yet\"\n )\n\n if 'l' in mode:\n raise NotImplementedError(\n \"Lazy modes are not supported yet\"\n )\n\n self.mode = mode\n\n # These might be None in case a corrupted wheel is read in lazy mode\n self.wheeldata: Optional[WheelData] = None\n self.metadata: Optional[MetaData] = None\n self.record: Optional[WheelRecord] = None\n\n if isinstance(file_or_path, str):\n file_or_path = Path(file_or_path)\n\n # TODO if value error, set build_tag to degenerated version, that\n # compares with Version in a way that makes Version the higher one.\n build_tag = int(build_tag) if build_tag is not None else None\n\n if self._is_unnamed_or_directory(file_or_path):\n self._require_distname_and_version(distname, version)\n\n filename = self._get_filename(file_or_path)\n self._pick_a_distname(filename, given_distname=distname)\n self._pick_a_version(filename, given_version=version)\n self._pick_tags(\n filename, build_tag, language_tag, abi_tag, platform_tag\n )\n\n if self._is_unnamed_or_directory(file_or_path):\n assert distname is not None and version is not None # For Mypy\n self._generated_filename = self._generate_filename(\n self._distname, self._version, self._build_tag,\n self._language_tag, self._abi_tag, self._platform_tag\n )\n else:\n self._generated_filename = ''\n\n if isinstance(file_or_path, Path):\n file_or_path /= self._generated_filename\n\n # FIXME: the file is opened before validating the arguments, so this\n # litters empty and corrupted wheels if any arg is wrong.\n self._zip = ZipFile(file_or_path, mode)\n\n # Used by _distinfo_path\n self._distinfo_prefix: Optional[str] = None\n\n if 'w' in mode or 'x' in mode:\n self._initialize_distinfo()\n else:\n self._distinfo_prefix = self._find_distinfo_prefix()\n self._read_distinfo()\n\n if 'l' not in mode:\n self.validate()" ]
[ "0.5633376", "0.5386271", "0.5277653", "0.5108195", "0.5107922", "0.50593334", "0.5048385", "0.49720398", "0.49453267", "0.49376675", "0.49356878", "0.49261934", "0.49225807", "0.49225807", "0.48910007", "0.48887247", "0.48831517", "0.4878487", "0.4875914", "0.4871508", "0.4871276", "0.4869398", "0.4856159", "0.48526156", "0.48257694", "0.4814507", "0.4785805", "0.4783505", "0.47797978", "0.47695497" ]
0.79184014
0
List of data corresponding to individual bib files
def construct_bibfile_data(*paths): bibs = [reffile_factory(path) for path in paths] return bibs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _FindBibEntries(self):\n bibs = \" \".join(glob.glob(\"*.bib\"))\n cat_process = subprocess.Popen(shlex.split(\"cat %s\" % bibs),\n stdout=subprocess.PIPE)\n grep_process = subprocess.Popen(shlex.split(\"grep ^@\"),\n stdin=cat_process.stdout,\n stdout=subprocess.PIPE)\n cat_process.stdout.close()\n grep2_process = subprocess.Popen(shlex.split(\"grep -vi @string\"),\n stdin=grep_process.stdout,\n stdout=subprocess.PIPE)\n grep_process.stdout.close()\n\n lines = grep2_process.communicate()[0]\n\n ret = []\n for l in lines.split(\"\\n\"):\n ret.append(responses.BuildCompletionData(\n re.sub(r\"@([A-Za-z]*)\\s*{\\s*([^,]*),.*\", r\"\\2\", l)\n )\n )\n return ret", "def bib_sublist(bibfile_data, val_type):\n sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)]\n return sublist", "def getBooks(self):\n srcIds = set([srcId for srcId,altId in self.libMap.values()])\n altIds = set([altId for srcId,altId in self.libMap.values()])\n factory = {'BOOK':Book}\n for modName in mwIniFile.loadOrder:\n print modName\n fileRep = FileRep(modInfos[modName],False)\n fileRep.load(keepTypes=None,factory=factory)\n for record in fileRep.records:\n if record.name == 'BOOK':\n bookId = record.getId()\n if bookId in srcIds:\n print '',bookId\n self.srcBooks[bookId] = (record,modName)\n elif bookId in altIds:\n print '',bookId\n self.altBooks[bookId] = (record,modName)", "def get_bibfiles(folder: str) -> t.List[str]:\n full_pathname = os.path.normpath(os.path.abspath(folder))\n bib_files = []\n for f in os.listdir(full_pathname):\n fullname = os.path.join(full_pathname, f)\n if f.endswith(\".bib\") and os.path.isfile(fullname):\n logging.debug(f'get bibfile \"{f}\" from directory \"{full_pathname}\"')\n bib_files.append(fullname)\n return bib_files", "def list(ffiles):\n ret = {}\n print('Reading: ')\n for ffile in ffiles:\n print(ffile)\n ret[ffile] = data_file(ffile)\n return ret", "def parse_bib_from_list(filename):\n\tentry_regex = r\"TITEL: .*\\s*AUTOR: .*\"\n\tparse_func = make_parse_func(r\"AUTOR: (.*)\", r\"TITEL: (.*)\", None)\n\treturn parse_bib(filename, entry_regex, parse_func)", "def load_files_info(self):\n authors = {}\n paths = sorted(self.zipfile.filelist, key=lambda x: x.date_time)\n for path in paths:\n with self.zipfile.open(path) as fd:\n try:\n data = json.load(fd)\n names = self.extract_author_name(data)\n except json.JSONDecodeError:\n fd.seek(0)\n self.invalid_files[fd.name] = fd.read()\n else:\n authors.update({name: path for name in names})\n\n return authors", "def list_publications(bib_format=\"dict\"):\n\n def get_bibtex(key, value):\n total_keys = [\n \"title\",\n \"journal\",\n \"volume\",\n \"issue\",\n \"number\",\n \"pages\",\n \"numpages\",\n \"year\",\n \"month\",\n \"publisher\",\n \"url\",\n \"doi\",\n \"issn\",\n ]\n bibtex_str = (\n \"@article{\"\n + key\n + \",\\n\"\n + \" author={\"\n + \" and \".join(value[\"author\"])\n + \"},\\n\"\n )\n for key in total_keys:\n if key in value.keys():\n bibtex_str += \" \" + key + \"={\" + value[key] + \"},\\n\"\n bibtex_str += \"}\\n\"\n return bibtex_str\n\n def get_apa(value):\n apa_str = \" & \".join(value[\"author\"])\n if \"year\" in value.keys():\n apa_str += \" (\" + value[\"year\"] + \"). \"\n if \"title\" in value.keys():\n apa_str += value[\"title\"] + \". \"\n if \"journal\" in value.keys():\n apa_str += value[\"journal\"] + \", \"\n if \"volume\" in value.keys():\n apa_str += value[\"volume\"] + \", \"\n if \"pages\" in value.keys():\n apa_str += value[\"pages\"] + \". \"\n if \"doi\" in value.keys():\n apa_str += \"doi: \" + value[\"doi\"] + \"\\n\"\n return apa_str\n\n publication_dict = s.publication_lst\n if bib_format.lower() == \"dict\":\n return publication_dict\n elif bib_format.lower() == \"bibtex\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_bibtex(key, value)\n return total_str\n elif bib_format.lower() == \"apa\":\n total_str = \"\"\n for pub in publication_dict:\n for key, value in pub.items():\n total_str += get_apa(value)\n return total_str\n else:\n raise ValueError(\"Supported Bibformats are ['dict', 'bibtex', 'apa']\")", "def read_reference_data():\n return {f:read_local_file(f) for f in os.listdir(DATA_DIR)}", "def publication_data():\n with open('data/publications.json') as pubs:\n pubs = json.load(pubs)\n return pubs", "def load_list_of_entries(list_of_files):\n publication_entries = []\n entries = []\n titles = []\n\n for filename in list_of_files:\n entries_list = load_entries(filename)\n\n for e in entries_list:\n if e.main_publication:\n publication_entries.append(e)\n elif e.title not in titles:\n titles.append(e.title)\n entries.append(e)\n\n return publication_entries, entries", "def get_movie_data(files: list) -> list:\n pass", "def loadData():\n project_dir = \"/home/c/chandanchowdhury/Documents/CIS-833/CSSearch/indexer/\"\n\n index_file = \"index_file.pkl\"\n link_file = \"link_file.pkl\"\n\n index_data = loadPickle(project_dir+index_file)\n link_data = loadPickle(project_dir+link_file)\n\n return index_data, link_data", "def get_ancillary_files(self, docmeta: DocMetadata) \\\n -> List[Dict]:\n version = docmeta.version\n format_code = docmeta.version_history[version - 1].source_type.code\n if has_ancillary_files(format_code):\n source_file_path = self._get_source_path(docmeta)\n if source_file_path is not None:\n return list_ancillary_files(source_file_path)\n else:\n return []\n return []", "def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def __get_files(self):\r\n \r\n files = []\r\n with requests.Session() as s:\r\n headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64)'}\r\n respons = s.get(self.__url, headers=headers).text\r\n soup = BeautifulSoup(respons, 'html.parser')\r\n data_files = [link.get('href') for link in soup.find_all('a', class_=\"btn-primary\")]\r\n for year in soup.find_all('td', class_=\"align-middle\"):\r\n regex = re.compile(r\"data/data-?gis({year}|\\-rok\\-{year})\\.zip\".format(year=year.text))\r\n if any((match := regex.match(link)) for link in data_files):\r\n files.append(match.group(0))\r\n else:\r\n files.append(data_files[-1])\r\n return files", "def pdbfile_list():\n \n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list", "def split_bibitems(bibliography):\n \n refs = []\n for filename, bib in bibliography.iteritems():\n split_ind = []\n for ind, item in enumerate(bib):\n if item.startswith(r\"\\bibitem\"):\n split_ind.append(ind)\n \n for ref in partition(bib, split_ind):\n if ref:\n refs.append(RefObj.RefObj(filename, refstr='\\n'.join(ref)))\n return refs", "def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list", "def get_files(self):\n return self.ebook_file.get_files()", "def get_ancillary_files(docmeta: DocMetadata) -> List[Dict]:\n return current_session().get_ancillary_files(docmeta)", "def files(self):\n def f():\n return {'count': 0, 'size': 0, 'type': None}\n _files = defaultdict(f)\n\n for s in self.subjects:\n for sa in s.samples:\n for blob in sa.blobs.values():\n # get extension\n type = blob['name'].replace('.gz', '')\n type = type.split('/')[-1].split('.')[-1]\n _files[type]['count'] += 1\n _files[type]['type'] = type.title()\n _files[type]['size'] += blob['size']\n return _files", "def _extract(self):\r\n self._data = []\r\n for fname in self.files:\r\n meta = dict(filename=fname)\r\n\r\n # Perform the actual metadata extraction\r\n fname = os.path.splitext(self.filter_filename(fname))[0]\r\n values = fname.split(self.sep)\r\n\r\n # Handle the case where number of fields is less than the length\r\n # of the extracted values, ie cases where we only want to extract\r\n # a subset of available fields.\r\n if self.index:\r\n values = [val for i, val in enumerate(values) if i in self.index]\r\n\r\n meta.update(dict(zip(self.fields, values)))\r\n if self.split_by in self.fields:\r\n meta[self.split_by] = self._get_split_field_values(meta['filename'])\r\n self._data.append(meta)", "def load_bib(bib_name):\n print(\"Reading BibTex File: {}\".format(bib_name))\n curdir = osp.abspath('.')\n bib_path = osp.join(curdir, bib_name)\n print(\"Path: {}\".format(bib_path))\n print('Creating library..')\n add_dir('library')\n with open(bib_path, 'r') as f:\n # txt = f.read()\n line = f.readline()\n i = 0\n start = False\n while line:\n i += 1\n if (line.find('@')==1) or start: # reading entry\n if start == False:\n filename = get_name(line)\n start = True\n if line.find('title')==1:\n link = get_link(line)\n if link is not None:\n savepath = osp.join(curdir, 'library', filename+'.pdf')\n save_pdf(link, savepath)\n if (line.find('}')==1): # end of entry\n start=False\n line = f.readline()\n print(i) # print line number", "def getData():\n with open('obj/documents.pkl', 'rb') as file:\n data = pickle.load(file)\n return data", "def parse_bib(filename, entry_regex, parse_func):\n\twith open(filename) as f:\n\t\treturn filter(None, (parse_func(item.group()) \n\t\t for item in re.finditer(entry_regex, f.read())))", "def get_files(self):\n\n cur = self.app.conn.cursor()\n sql = \"select distinct case_text.fid, source.name from case_text join source on case_text.fid=source.id where \"\n sql += \"caseid=? order by lower(source.name) asc\"\n cur.execute(sql, [self.case['caseid'], ])\n self.casefiles = cur.fetchall()\n sql = \"select id, name, fulltext, mediapath, memo, owner, date, av_text_id from source order by source.name asc\"\n cur.execute(sql)\n self.allfiles = cur.fetchall()\n msg = _(\"Files linked: \") + str(len(self.casefiles)) + \" / \" + str(len(self.allfiles))\n self.ui.label_files_linked.setText(msg)", "def _extract_core_biblio(self, bib):\n try:\n pubnumber = bib_scalar(bib, 'pubnumber')\n pubdate = datetime.strptime(bib_scalar(bib, 'pubdate'), '%Y%m%d')\n fam_raw = bib_scalar(bib, 'family_id')\n family_id = int(fam_raw) if fam_raw != None else fam_raw\n assign_applic_raw = bib.get('assign_applic')\n assign_applic = '|'.join(assign_applic_raw) if len(assign_applic_raw) > 0 else \"\"\n except KeyError, exc:\n raise RuntimeError(\"Document is missing mandatory biblio field (KeyError: {})\".format(exc))\n if len(pubnumber) == 0:\n raise RuntimeError(\"Document publication number field is empty\")\n\n return family_id, pubdate, pubnumber, assign_applic", "def getDocuments(self):\n return self.objectValues('Multifile')", "def get_file_data(filename):" ]
[ "0.7137384", "0.6268261", "0.6240238", "0.6198332", "0.61708087", "0.61559683", "0.61306244", "0.60698205", "0.6049318", "0.5935787", "0.5895451", "0.5891294", "0.58823615", "0.58217996", "0.58115286", "0.57741606", "0.5756594", "0.57499486", "0.5724294", "0.57221454", "0.5721215", "0.5698372", "0.56902593", "0.569025", "0.5687068", "0.56692743", "0.5634584", "0.5623458", "0.5614296", "0.5609963" ]
0.71239555
1
Sublist of bibfile_data whos elements are val_type This method examines each bib_dict element of a bibfile_data list and returns the subset which can be classified according to val_type.
def bib_sublist(bibfile_data, val_type): sublist = [bibfile for bibfile in bibfile_data if isinstance(bibfile.bib, val_type)] return sublist
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getvartypelist(self,subj,vartype): # 3\n num_ = None\n if num_ is None:\n num_ = len(subj)\n elif num_ != len(subj):\n raise IndexError(\"Inconsistent length of array subj\")\n if num_ is None: num_ = 0\n if subj is None: raise TypeError(\"Invalid type for argument subj\")\n if subj is None:\n subj_ = None\n else:\n try:\n subj_ = memoryview(subj)\n except TypeError:\n try:\n _tmparr_subj = array.array(\"i\",subj)\n except TypeError:\n raise TypeError(\"Argument subj has wrong type\")\n else:\n subj_ = memoryview(_tmparr_subj)\n \n else:\n if subj_.format != \"i\":\n subj_ = memoryview(array.array(\"i\",subj))\n \n _copyback_vartype = False\n if vartype is None:\n vartype_ = None\n else:\n try:\n vartype_ = memoryview(vartype)\n except TypeError:\n try:\n _tmparr_vartype = array.array(\"i\",vartype)\n except TypeError:\n raise TypeError(\"Argument vartype has wrong type\")\n else:\n vartype_ = memoryview(_tmparr_vartype)\n _copyback_vartype = True\n else:\n if vartype_.format != \"i\":\n vartype_ = memoryview(array.array(\"i\",vartype))\n _copyback_vartype = True\n if vartype_ is not None and len(vartype_) != (num_):\n raise ValueError(\"Array argument vartype has wrong length\")\n res = self.__obj.getvartypelist(num_,subj_,vartype_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if _copyback_vartype:\n for __tmp_var_0 in range(len(vartype_)): vartype[__tmp_var_0] = variabletype(_tmparr_vartype[__tmp_var_0])", "def get_subcase_types(self, case_type):\n return {t for m in self.get_modules()\n if m.case_type == case_type\n for t in m.get_subcase_types()}", "def get_recipes_by_types(self, recipe_type): \n\t\tfor key, val in self.recipes_list.items():\n\t\t\tif key == recipe_type:\n\t\t\t\tfor a, b in val.items():\n\t\t\t\t\tprint(str(b))", "def filter_inspection_type(data, inspection_type):\n return [row for row in data if row['inspection_type'] == inspection_type]", "def parse_var_list(config, time_info=None, data_type=None, met_tool=None,\n levels_as_list=False):\n\n # validate configs again in case wrapper is not running from run_metplus\n # this does not need to be done if parsing a specific data type,\n # i.e. ENS or FCST\n if data_type is None:\n if not validate_field_info_configs(config)[0]:\n return []\n elif data_type == 'BOTH':\n config.logger.error(\"Cannot request BOTH explicitly in parse_var_list\")\n return []\n\n # var_list is a list containing an list of dictionaries\n var_list = []\n\n # if specific data type is requested, only get that type\n if data_type:\n data_types = [data_type]\n # otherwise get both FCST and OBS\n else:\n data_types = ['FCST', 'OBS']\n\n # get indices of VAR<n> items for data type and/or met tool\n indices = []\n if met_tool:\n indices = find_var_name_indices(config, data_types, met_tool).keys()\n if not indices:\n indices = find_var_name_indices(config, data_types).keys()\n\n # get config name prefixes for each data type to find\n dt_search_prefixes = {}\n for current_type in data_types:\n # get list of variable prefixes to search\n prefixes = get_field_search_prefixes(current_type, met_tool)\n dt_search_prefixes[current_type] = prefixes\n\n # loop over all possible variables and add them to list\n for index in indices:\n field_info_list = []\n for current_type in data_types:\n # get dictionary of existing config variables to use\n search_prefixes = dt_search_prefixes[current_type]\n field_configs = get_field_config_variables(config,\n index,\n search_prefixes)\n\n field_info = format_var_items(field_configs, time_info)\n if not isinstance(field_info, dict):\n config.logger.error(f'Could not process {current_type}_'\n f'VAR{index} variables: {field_info}')\n continue\n\n field_info['data_type'] = current_type.lower()\n field_info_list.append(field_info)\n\n # check that all fields types were found\n if not field_info_list or len(data_types) != len(field_info_list):\n continue\n\n # check if number of levels for each field type matches\n n_levels = len(field_info_list[0]['levels'])\n if len(data_types) > 1:\n if (n_levels != len(field_info_list[1]['levels'])):\n continue\n\n # if requested, put all field levels in a single item\n if levels_as_list:\n var_dict = {}\n for field_info in field_info_list:\n current_type = field_info.get('data_type')\n var_dict[f\"{current_type}_name\"] = field_info.get('name')\n var_dict[f\"{current_type}_level\"] = field_info.get('levels')\n var_dict[f\"{current_type}_thresh\"] = field_info.get('thresh')\n var_dict[f\"{current_type}_extra\"] = field_info.get('extra')\n var_dict[f\"{current_type}_output_name\"] = field_info.get('output_names')\n\n var_dict['index'] = index\n var_list.append(var_dict)\n continue\n\n # loop over levels and add all values to output dictionary\n for level_index in range(n_levels):\n var_dict = {}\n\n # get level values to use for string substitution in name\n # used for python embedding calls that read the level value\n sub_info = {}\n for field_info in field_info_list:\n dt_level = f\"{field_info.get('data_type')}_level\"\n sub_info[dt_level] = field_info.get('levels')[level_index]\n\n for field_info in field_info_list:\n current_type = field_info.get('data_type')\n name = field_info.get('name')\n level = field_info.get('levels')[level_index]\n thresh = field_info.get('thresh')\n extra = field_info.get('extra')\n output_name = field_info.get('output_names')[level_index]\n\n # substitute level in name if filename template is specified\n subbed_name = do_string_sub(name,\n skip_missing_tags=True,\n **sub_info)\n\n var_dict[f\"{current_type}_name\"] = subbed_name\n var_dict[f\"{current_type}_level\"] = level\n var_dict[f\"{current_type}_thresh\"] = thresh\n var_dict[f\"{current_type}_extra\"] = extra\n var_dict[f\"{current_type}_output_name\"] = output_name\n\n var_dict['index'] = index\n var_list.append(var_dict)\n\n # extra debugging information used for developer debugging only\n '''\n for v in var_list:\n config.logger.debug(f\"VAR{v['index']}:\")\n if 'fcst_name' in v.keys():\n config.logger.debug(\" fcst_name:\"+v['fcst_name'])\n config.logger.debug(\" fcst_level:\"+v['fcst_level'])\n if 'fcst_thresh' in v.keys():\n config.logger.debug(\" fcst_thresh:\"+str(v['fcst_thresh']))\n if 'fcst_extra' in v.keys():\n config.logger.debug(\" fcst_extra:\"+v['fcst_extra'])\n if 'fcst_output_name' in v.keys():\n config.logger.debug(\" fcst_output_name:\"+v['fcst_output_name'])\n if 'obs_name' in v.keys():\n config.logger.debug(\" obs_name:\"+v['obs_name'])\n config.logger.debug(\" obs_level:\"+v['obs_level'])\n if 'obs_thresh' in v.keys():\n config.logger.debug(\" obs_thresh:\"+str(v['obs_thresh']))\n if 'obs_extra' in v.keys():\n config.logger.debug(\" obs_extra:\"+v['obs_extra'])\n if 'obs_output_name' in v.keys():\n config.logger.debug(\" obs_output_name:\"+v['obs_output_name'])\n if 'ens_name' in v.keys():\n config.logger.debug(\" ens_name:\"+v['ens_name'])\n config.logger.debug(\" ens_level:\"+v['ens_level'])\n if 'ens_thresh' in v.keys():\n config.logger.debug(\" ens_thresh:\"+str(v['ens_thresh']))\n if 'ens_extra' in v.keys():\n config.logger.debug(\" ens_extra:\"+v['ens_extra'])\n if 'ens_output_name' in v.keys():\n config.logger.debug(\" ens_output_name:\"+v['ens_output_name'])\n '''\n return sorted(var_list, key=lambda x: x['index'])", "def _deserialize_list(data, boxed_type):\n return [_deserialize(sub_data, boxed_type)\n for sub_data in data]", "def get_subset(mlist,year):\n newlist = []\n for entry in mlist:\n if int(entry[0][:4]) > int(year):\n continue\n newvec = entry[:8]\n citations = entry[8]['citations']\n citations = filter(lambda a: int(a[:4]) <= int(year), citations)\n newvec[2] = len(citations)\n newlist.append(newvec)\n return newlist", "def getvartypelist(self,subj_,vartype_):\n num_ = None\n if num_ is None:\n num_ = len(subj_)\n elif num_ != len(subj_):\n raise IndexError(\"Inconsistent length of array subj\")\n if subj_ is None:\n raise ValueError(\"Argument subj cannot be None\")\n if subj_ is None:\n raise ValueError(\"Argument subj may not be None\")\n if isinstance(subj_, numpy.ndarray) and subj_.dtype is numpy.dtype(numpy.int32) and subj_.flags.contiguous:\n _subj_copyarray = False\n _subj_tmp = ctypes.cast(subj_.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n elif subj_ is not None:\n _subj_copyarray = True\n _subj_np_tmp = numpy.zeros(len(subj_),numpy.dtype(numpy.int32))\n _subj_np_tmp[:] = subj_\n assert _subj_np_tmp.flags.contiguous\n _subj_tmp = ctypes.cast(_subj_np_tmp.ctypes._as_parameter_,ctypes.POINTER(ctypes.c_int32))\n else:\n _subj_copyarray = False\n _subj_tmp = None\n \n _vartype_minlength = (num_)\n if (num_) > 0 and vartype_ is not None and len(vartype_) != (num_):\n raise ValueError(\"Array argument vartype is not long enough: Is %d, expected %d\" % (len(vartype_),(num_)))\n if isinstance(vartype_,numpy.ndarray) and not vartype_.flags.writeable:\n raise ValueError(\"Argument vartype must be writable\")\n if vartype_ is not None:\n _vartype_tmp = (ctypes.c_int32 * len(vartype_))()\n else:\n _vartype_tmp = None\n res = __library__.MSK_XX_getvartypelist(self.__nativep,num_,_subj_tmp,_vartype_tmp)\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n if vartype_ is not None: vartype_[:] = [ variabletype(v) for v in _vartype_tmp[0:len(vartype_)] ]", "def test_metadata_subsets_key_list(self):\n self.assertEqual(type(self.metadata.get('subsets', '')), type([]))", "def subdata(min_,dict_):\n list_ = []\n return [value for value,freq in dict_.items() if freq > min_]", "def get_type_section_data(key_list):\n return np.array(list(map(lambda key: format_keyword(INDEX_SECTIONS_DATA[key].type), key_list)))", "def getvaluelist(doclist, fieldname):\n\tl = []\n\tfor d in doclist:\n\t\tl.append(d.fields[fieldname])\n\treturn l", "def cellAnalysis(celltypelist, fullcsvpaths):\n typelist, paths = [], []\n with open(celltypelist, 'r') as fIn:\n for line in fIn:\n typelist.append(line.strip().split(','))\n with open(fullcsvpaths, 'r') as fIn:\n for line in fIn:\n paths.append(line.strip())\n \n # Create the default dicts\n types = list(set([p[0] for p in typelist]))\n groups = list(set([p[2] for p in typelist]))\n checks = ['maxV', 'maxDerivV', 'maxDerivdV', 'minDerivV',\n 'minDerivdV', 'preMinV', 'postMinV', 'preMaxCurveV',\n 'preMaxCurveK', 'postMaxCurveV', 'postMaxCurveK',\n 'height', 'repolarizationV', 'intervals', 'frequencies']\n props = {typ: {ch: {gr: {} for gr in groups} for ch in checks} for typ in types}\n # Add a few more keys\n for typ in types:\n props[typ]['activity'] = {gr: {} for gr in groups}\n props[typ]['duration'] = {gr: {} for gr in groups}\n \n # Find the matching csv files\n paths = [p for p in paths if p.split('_')[-1]=='clusters.csv'] # If it's a clusters file\n reffils = [f.split('/')[-1].split('_')[0].split('.')[0] for f in paths] # ref to cluster file\n typepaths = []\n #print(\n \n for fil in typelist:\n t_ = fil[1].split('.')[0]\n if t_ in reffils:\n typepaths.append(paths[reffils.index(t_)])\n else:\n typepaths.append('none')\n \n # Populate the dictionary\n fail, success = [], []\n print('%i (of %i) files seem to be present' %(len(typepaths)-typepaths.count('none'),\n len(typepaths)))\n for g in range(len(typepaths)): # This retains the order of typelist\n try:\n df = pd.read_csv(typepaths[g])\n df = df.drop('Unnamed: 33', 1) # Garbage\n df = df.drop('freq', 1) # These are downsampled\n df = df.dropna() # Dropna\n \n # If there are multiple clusters, add them in order\n if max(df.clust_inds) == 1: # Two clusters\n numClusts = int(max(df.clust_inds)+1)\n for ch in checks:\n type_ = typelist[g][0]\n group_ = typelist[g][2]\n cell_ = typelist[g][1].split('.')[0]\n for clust in range(numClusts):\n props[type_][ch][group_][cell_].append(df[df['clust_inds']==clust][ch].dropna().values)\n else: # Just one cluster\n for ch in checks:\n props[type_][ch][group_][cell_] = [df[ch].dropna().values]\n \n # Get activity profile\n tIn, cBouts = timeInClusters(df)\n props[type_]['activity'][group_][cell_] = [tIn, cBouts]\n props[type_]['duration'][group_][cell_] = df.times.iloc[-1]\n success.append(typelist[g])\n \n except:\n fail.append(typelist[g])\n \n #print(failed)\n return props, success, fail", "def crawl_data(self, data_type):\n i = 0\n cat = {}\n prod = {}\n for term in self.search_response:\n if data_type == 'category' and term['products'] > 6000:\n i += 1\n cat[i] = {\"name\": term['name']}\n self.categories.append(cat[i])\n elif data_type == 'product':\n nutrigrade = \"\".join(term[\"nutrition_grades_tags\"])\n if nutrigrade in (\"a\", \"b\", \"c\", \"d\", \"e\"):\n i += 1\n prod[i] = {\"name\": term['product_name_fr'], \"url\": term['url'], \"desc\": term['generic_name_fr'],\n \"brand\": term['brands'], \"categories\": term['categories'], \"store\": term['stores'],\n \"nutriscore\": nutrigrade}\n self.products.append(prod[i])\n else:\n pass", "def type_filter(self, items, types=None):", "def _decode_vector(data, sub=False):\n \n main_list = []\n\n if sub: \n # We are decoding a sub-vector, XML is assumed compliant\n tree = data\n else:\n fixed_data = XML_Fix(data)\n tree = ET.fromstring(fixed_data)\n\n for child in tree:\n if 'Value' in child.attrib and child.attrib['Name'] != 'count': # There will never be 'Value' without a 'Name'\n decoded_value = _decode_value(child.attrib['Value'])\n main_list.append(decoded_value)\n elif 'Type' in child.attrib:\n collection_type = child.attrib['Type'] \n if collection_type == 'elsystem.collections.vector':\n sub_list = _decode_vector(data=child, sub=True)\n main_list.append(sub_list)\n elif collection_type == 'elsystem.collections.dictionary':\n sub_dict = _decode_dictionary(child, sub=True)\n main_list.append(sub_dict) \n\n return main_list", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def getData(self, value=None, key=\"description\"):\n if value:\n result = None\n for item in self.data:\n target = None\n current = item[key] # it could be either string or a list of string\n if type(value) == list:\n if type(current) == list:\n found = False\n for valueItem in value:\n if valueItem in current:\n found = True\n else:\n found = False\n if found:\n target = item\n else:\n if current in value:\n target = item\n else:\n if type(current) == list:\n if value in current:\n target = item\n else:\n if value == current:\n target = item\n if target:\n if not result:\n result = []\n result.append(target)\n return result\n \n else:\n return self.data", "def get_sub_values(self):\n return list()", "def get_documents(self, value, key='name'):\n documents = []\n for doc in value:\n if doc.endswith('.json'):\n key = 'filename'\n documents.append([x for x in self.vocab if x[key] == doc])\n return documents", "def getTags(self, data_type=\"AOD\", filter_full=False, filter_fast=False):\n if not self.hasDataType(data_type):\n logging.warning(\"Unkown data format %s for sample %s (%d)\" % (data_type, self.name(), self.dsid()))\n return []\n List = []\n for key in self.tags(data_type):\n if not filter_full and key.find(\"_s\") != -1: List.append(key)\n elif not filter_fast and key.find(\"_a\") != -1: List.append(key)\n elif not filter_full and not filter_fast: List.append(key)\n elif key.find(\"_s\") == -1 and key.find(\"_a\") == -1: List.append(key)\n return List", "def process(list_, dict_, keyword):\n if len(list_) == 4:\n name, val, type_, frac_ = list_[0], list_[1], list_[2], list_[3]\n elif list_[0] == 'direc':\n name, val = list_[0], [list_[i] for i in range(len(list_)) if i > 0]\n else:\n name, val = list_[0], list_[1]\n\n if name not in dict_[keyword].keys() and name in ['coeff']:\n dict_[keyword][name] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST'] and 'types' not in dict_[keyword].keys():\n dict_[keyword]['types'] = []\n if keyword in ['TREATED', 'UNTREATED', 'COST']:\n if len(list_) == 4:\n dict_[keyword]['types'] += [[type_, float(frac_)]]\n else:\n dict_[keyword]['types'] += ['nonbinary']\n\n # Type conversion\n if name in ['agents', 'seed', 'maxiter', 'disp']:\n val = int(val)\n elif name in ['source', 'file', 'optimizer', 'start']:\n val = str(val)\n elif name in ['direc']:\n val = list(val)\n else:\n val = float(val)\n if name in ['coeff']:\n dict_[keyword][name] += [val]\n else:\n dict_[keyword][name] = val\n # Finishing.\n return dict_", "def filter_list(data: List[dict], field: str, selected: List[str]):\n if len(selected):\n return [x for x in data if x[field] in selected]\n else:\n return data", "def Filter(self,val):\n \n #set th elength of the lis to 0\n List = [self.InitialList[i] for i in range(0,len(self.InitialList))]\n FilterValues = [None]\n Grab = [None]\n Headers = []\n \n #create the quick index\n for i in range(len(self.Condensensed)):\n \n Headers.append([self.Condensensed[i][l][0] for l in range(len(self.Condensensed[i]))])\n \n #grab the values...\n for j in range(len(self.Variables)):\n \n FilterValues.append(self.Variables[j].get())\n\n if self.Variables[j].get().split(' ')[0] == 'All':\n \n Grab.append(False)\n \n else:\n \n Grab.append(True)\n \n #intermediate list to compare\n ToCompare = []\n \n for i in range(1,len(Grab)):\n \n if Grab[i]:\n \n #find the index\n l = Headers[i].index(FilterValues[i])\n \n #grab it\n ToCompare.append([self.Condensensed[i][l][m] for m in range(len(self.Condensensed[i][l]))])\n\n\n for i in range(0, len(ToCompare)):\n \n List = list(set(List).intersection(ToCompare[i]))\n\n #update the interface\n self.Gatherer(List,list(self.Input))\n self.BuildTree()", "def filter_items(self, filter_data: Dict[str, str] = None) -> List[WalletItem]:\n filtered_items = self.items\n for key, value in filter_data.items():\n if key == \"category\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.category, re.IGNORECASE)]\n if key == \"account\":\n filtered_items = [item for item in filtered_items\n if re.search(value, item.account, re.IGNORECASE)]\n if key == \"notes\" in filter_data:\n filtered_items = [item for item in filtered_items\n if re.search(value, item.notes, re.IGNORECASE)]\n if key == \"amt_min\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount >= value]\n if key == \"amt_max\":\n value = float(value)\n filtered_items = [item for item in filtered_items if item.amount <= value]\n if key == \"begin_date\":\n try:\n begin_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if begin_date <= item.date]\n except ValueError as ex:\n print(ex)\n exit(1)\n if key == \"end_date\":\n try:\n end_date = datetime.strptime(value, '%d/%m/%Y')\n filtered_items = [item for item in filtered_items if item.date <= end_date]\n except ValueError as ex:\n print(ex)\n exit(1)\n return filtered_items", "def getlist(self, key, type=None):\n if key not in self:\n return []\n values = super().__getitem__(key)\n if type is not None:\n values = [type(value) for value in values]\n return values", "def cal_values(type, title, pubtime):\n\n result = []\n for article in comparison_data(type, pubtime):\n if not title or not article.title:\n continue\n value = compare_title(title, article.title)\n if value > 0.8:\n result.append(article)\n\n return result", "def document_type(self, key, value):\n _doc_type = self.get(\"document_type\", {})\n\n def doc_type_mapping(val):\n if val:\n return mapping(DOCUMENT_TYPE, val)\n\n for v in force_list(value):\n val_a = doc_type_mapping(clean_val(\"a\", v, str))\n val_b = doc_type_mapping(clean_val(\"b\", v, str))\n\n if not val_a and not val_b and not _doc_type:\n raise UnexpectedValue(subfield=\"a\")\n\n if val_a and val_b and (val_a != val_b != _doc_type):\n raise ManualImportRequired(\n subfield=\"a or b - \" \"inconsistent doc type\"\n )\n if val_a:\n if _doc_type and _doc_type != val_a:\n raise ManualImportRequired(\n subfield=\"a\" \"inconsistent doc type\"\n )\n _doc_type = val_a\n if val_b:\n if _doc_type and _doc_type != val_a:\n raise ManualImportRequired(\n subfield=\"b\" \"inconsistent doc type\"\n )\n _doc_type = val_b\n return _doc_type", "def build_subsets(self, field):\n sss = defaultdict(list)\n for r in self.__elements__:\n sss[getattr(r, field)].append(r)\n return dict(sss)", "def testscfvaluetype(self):\r\n assert isinstance(self.data.scfvalues, list)\r\n assert isinstance(self.data.scfvalues[0], numpy.ndarray)" ]
[ "0.5007208", "0.49767375", "0.49480826", "0.46832088", "0.46792015", "0.4644492", "0.46313435", "0.46113947", "0.45949426", "0.45826903", "0.45621505", "0.45417857", "0.4523038", "0.45103496", "0.4509286", "0.4508049", "0.4495507", "0.4486568", "0.44805893", "0.44530094", "0.4448209", "0.44427168", "0.4440903", "0.4436199", "0.4431072", "0.44263306", "0.43844062", "0.4377243", "0.43525705", "0.43425405" ]
0.82294893
0
Generate appropriate message for STDOUT This method creates the string to be printed to STDOUT from the items of the `bibfile_data` list argument. It generates either a terse or verbose message based on the state of the `verbose` argument.
def gen_stdout_test_msg(bibfile_data, verbose=False): msg_list = [bibfile.test_msg(verbose) for bibfile in bibfile_data] msg = "\n".join(msg_list) return msg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_verbose(message:str):\n if params['verbose']:\n print(message)\n return", "def print_verbose(args, msg):\n if args.verbose:\n print(msg)", "def print_info(message: str):\n global verbose\n if verbose:\n print(\"%s%s%s\" % (KYEL, message, KNRM))", "def print_warning(verbose, message):\n if verbose:\n print(message)", "def VerboseOut(self, message):\n if self._verbose:\n self.StdErr(message, die=False)", "def verbose_print(msg: str = '') -> None:\n assert isinstance(msg, str)\n if __verbose:\n print(msg)", "def verbose_str(self):\n return self.summary.verbose(self.results) or ''", "def _verbose(self,text):\n if self.verbose:\n print(text)", "def vprint(msg):\n if defaults.verbose:\n print(msg)", "def _output(self, message, verbosity, exact, stream):\n if exact:\n if self.config.verbosity == verbosity:\n stream.write(message + \"\\n\")\n else:\n if self.config.verbosity >= verbosity:\n stream.write(message + \"\\n\")", "def create_output(self, bib):\n for b in bib:\n if isinstance(b, ReferenceErrorInfo):\n self.error_count += 1\n if self.args.comments:\n self.output.append(b.bibtex())\n if not self.args.quiet:\n self.messages.append(str(b))\n else:\n self.output.append(b.bibtex())", "def verbose ( self , message , *args , **kwargs ) :\n return self.logger.verbose ( message , *args , **kwargs )", "def display_detail(msg, *args):\n msg = _concat_message(msg, *args)\n if verbose > 1:\n print \" %s\" % msg.encode(\"UTF-8\")\n sys.stdout.flush()\n if prefs.pref(\"LoggingLevel\") > 0:\n munkilog.log(u\" \" + msg)", "def printmsg(msg, verbose):\n if verbose:\n print(msg)\n\n return None", "def print_help():\n \n print(\"\"\"\n catsub - substitutes every value for each variable in each word of a template file.\n\n Usage:\n\n catsub [--help] [-s] [-u] [-D|-dSTR] [TEMPLATEFILES] [%VARNAME VALUE1 VALUE2 ... ]*\n\n Arguments:\n\n TEMPLATEFILES Name(s) of file(s) containg the template with\n variables of the from %VARNAME; If no file name\n is given, or the name is '-', catsub will read\n from standard input;\n %VARNAME Variable name to substitute;\n VALUE1 VALUE2 ... Values to substitute for the variable;\n -s Print statistics to stderr on resolved and unresolved variables.\n -u Escaped percentage in template are returned unescaped;\n -dSTR Use STR to divide multiple substituted values\n -D Use newline to divide multiple substituted values\n --help Show this help page.\n\n Notes: \n\n - The names of the template files may not start with a percent sign. \n\n - All variables must start with a percent sign and cannot contain\n whitespace.\n\n - Substituted values cannot start with a percent sign.\n\n - Substitution happens only once per variable, i.e., substituted\n values do not undergo subsequent substitutions.\n\n - When substituting several values, those values are separated by a\n space, by a newline if the -D argument was given, or by STR if\n the -dSTR argument was given.\n\n - When a variable has been given several values to substitute and\n the variable occurs in a substring of a word in the template,\n that word get repeated. E.g. \"echo un%X | catsub %X kind tidy\"\n gives \"unkind untidy\"\n\n - Substitution happens combinatorically within a word. E.g. a word\n \"%X,%Y\" in the template, when processed with \"catsub %X a b %Y c d\"\n becomes \"a,c a,d b,c b,d\". Combinatorics can be circumvented by\n quoting the replacement values, i.e. \"catsub %X 'a b' %Y 'c d'\"\n gives \"a b,c d\".\n\n - Substitution uses the longest possible variable name. E.g. in\n \"%HELLOWORLD\", both %HELLO and %HELLOWORLD could be substituted\n if values for both are specified on the catsub command, but it is\n the longer %HELLOWORLD that gets used.\n \n - Percentage signs in the template can escape substitution by\n prepeding them with a slash, i.e., '\\%'. Every '\\%' in the\n template will be remain a '\\%' unless the -u argument is used in\n the catsub command, in which case, they are replaced by '%'.\n\n - The template cannot use the unicode character '%'.\n\n Examples:\n\n $ echo %HELLO %UNIVERSE | catsub %HELLO Hi %UNIVERSE world\n Hi world\n\n $ echo %HELLO %UNIVERSE > example.tmpl\n $ catsub example.tmpl %HELLO Greetings %UNIVERSE universe!\n Greetings universe!\n\"\"\")", "def report(message):\n if _verbose:\n print message", "def _print(self, msg, msg_args):\r\n # XXX: Not using the logger framework: need to\r\n # learn to use logger better.\r\n if not self.verbose:\r\n return\r\n if self.verbose < 50:\r\n writer = sys.stderr.write\r\n else:\r\n writer = sys.stdout.write\r\n msg = msg % msg_args\r\n writer('[%s]: %s\\n' % (self, msg))", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = (\n \"<xbout.BoutDataset>\\n\"\n + \"Contains:\\n{}\\n\".format(str(self.data))\n + \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n )\n if self.options:\n text += \"Options:\\n{}\".format(self.options)\n return text", "def generate_strings():\n\n # used by error pages and in the sidebar for why to create a subverbify\n for category, strings in funny_translatable_strings.iteritems():\n for string in strings:\n print \"# TRANSLATORS: Do not translate literally. Come up with a funny/relevant phrase (see the English version for ideas.) Accepts markdown formatting.\"\n print \"print _('\" + string + \"')\"\n\n # these are used in v1.lib.pages.trafficpages\n INTERVALS = (\"hour\", \"day\", \"month\")\n TYPES = (\"uniques\", \"pageviews\", \"traffic\", \"impressions\", \"clicks\")\n for interval in INTERVALS:\n for type in TYPES:\n print \"print _('%s by %s')\" % (type, interval)", "def __str__(self):\n if self.filename:\n filename = self.filename\n else:\n filename = 'Unknown'\n if self.endian == '<':\n endian = 'Little Endian'\n else:\n endian = 'Big Endian'\n ret_val = ('FILE: %s\\nRecord Offset: %i byte\\n' +\n 'Header Endianness: %s\\n\\n') % \\\n (filename, self.record_offset, endian)\n ret_val += 'FIXED SECTION OF DATA HEADER\\n'\n for key in self.fixed_header.keys():\n ret_val += '\\t%s: %s\\n' % (key, self.fixed_header[key])\n ret_val += '\\nBLOCKETTES\\n'\n for key in self.blockettes.keys():\n ret_val += '\\t%i:' % key\n if not len(self.blockettes[key]):\n ret_val += '\\tNOT YET IMPLEMENTED\\n'\n for _i, blkt_key in enumerate(self.blockettes[key].keys()):\n if _i == 0:\n tabs = '\\t'\n else:\n tabs = '\\t\\t'\n ret_val += '%s%s: %s\\n' % (tabs, blkt_key,\n self.blockettes[key][blkt_key])\n ret_val += '\\nCALCULATED VALUES\\n'\n ret_val += '\\tCorrected Starttime: %s\\n' % self.corrected_starttime\n return ret_val", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nThe import for user {username} has failed to import. The path to the import\r\nis:\r\n\r\n{file_path}\r\n\r\nError:\r\n\r\n{exc}\r\n\r\n\"\"\".format(**message_data)\r\n return msg", "def msg(txt, *args):\n if QUIET:\n return\n if args:\n txt = txt % args\n sys.stderr.write(txt + '\\n')", "def outputText(self, item, titleMode, internal=False):\n if self.useFileInfo:\n item = globalref.docRef.fileInfoItem\n storedText = item.data.get(self.name, '')\n if storedText:\n return self.formatOutput(storedText, titleMode, internal)\n return ''", "def __str__(self):\n\n styled = partial(prettyformat, indent=4, compact=True)\n text = \"<xbout.BoutDataset>\\n\" + \\\n \"Contains:\\n{}\\n\".format(str(self.data)) + \\\n \"Metadata:\\n{}\\n\".format(styled(self.metadata))\n if self.options:\n text += \"Options:\\n{}\".format(styled(self.options))\n return text", "def _getDiagnosticString():\n text = '\\n## Diagnostic output from minimalmodbus ## \\n\\n'\n text += 'Minimalmodbus version: ' + __version__ + '\\n'\n text += 'Minimalmodbus status: ' + __status__ + '\\n'\n text += 'Revision: ' + __revision__ + '\\n'\n text += 'Revision date: ' + __date__ + '\\n'\n text += 'File name (with relative path): ' + __file__ + '\\n'\n text += 'Full file path: ' + os.path.abspath(__file__) + '\\n\\n'\n text += 'pySerial version: ' + serial.VERSION + '\\n'\n text += 'pySerial full file path: ' + os.path.abspath(serial.__file__) + '\\n\\n'\n text += 'Platform: ' + sys.platform + '\\n'\n text += 'Filesystem encoding: ' + repr(sys.getfilesystemencoding()) + '\\n'\n text += 'Byteorder: ' + sys.byteorder + '\\n'\n text += 'Python version: ' + sys.version + '\\n'\n text += 'Python version info: ' + repr(sys.version_info) + '\\n'\n text += 'Python flags: ' + repr(sys.flags) + '\\n'\n text += 'Python argv: ' + repr(sys.argv) + '\\n'\n text += 'Python prefix: ' + repr(sys.prefix) + '\\n'\n text += 'Python exec prefix: ' + repr(sys.exec_prefix) + '\\n'\n text += 'Python executable: ' + repr(sys.executable) + '\\n'\n try:\n text += 'Long info: ' + repr(sys.long_info) + '\\n'\n except:\n text += 'Long info: (none)\\n' # For Python3 compatibility\n try:\n text += 'Float repr style: ' + repr(sys.float_repr_style) + '\\n\\n'\n except:\n text += 'Float repr style: (none) \\n\\n' # For Python 2.6 compatibility\n text += 'Variable __name__: ' + __name__ + '\\n'\n text += 'Current directory: ' + os.getcwd() + '\\n\\n'\n text += 'Python path: \\n'\n text += '\\n'.join(sys.path) + '\\n'\n text += '\\n## End of diagnostic output ## \\n'\n return text", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour import has failed. The error is listed below. Please file a bug at\r\nhttps://github.com/bookieio/bookie/issues if this error continues. You may\r\nalso join #bookie on freenode irc if you wish to aid in debugging the issue.\r\nIf the error pertains to a specific bookmark in your import file you might try\r\nremoving it and importing the file again.\r\n\r\nError\r\n----------\r\n\r\n{exc}\r\n\r\nA copy of this error has been logged and will be looked at.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def _get_message_body(self, template_file, message_data):\r\n\r\n msg = \"\"\"\r\nYour bookmark import is complete! We've begun processing your bookmarks to\r\nload their page contents and fulltext index them. This process might take a\r\nwhile if you have a large number of bookmarks. Check out your imported\r\nbookmarks at https://bmark.us/{username}/recent.\r\n\r\n---\r\nThe Bookie Team\"\"\".format(**message_data)\r\n return msg", "def message(self, data, newline=\"\\n\"):\n # Are we logging to screen, file or both?\n if not self.quiet:\n print(data)\n\n if self.log_fo:\n self.log_fo.write(data + newline)\n self.log_fo.flush()", "def verbose_print(text,verbose_level):\n if Args.verbose >= verbose_level:\n print '\\t' * (verbose_level-1) + text", "def verbose(ctx, msg, *args):\n if ctx.verbose:\n info(msg, *args)" ]
[ "0.55090094", "0.5451742", "0.5423902", "0.5419448", "0.5378487", "0.53770965", "0.5367469", "0.53551465", "0.53439623", "0.53236914", "0.53030246", "0.52971905", "0.52728134", "0.52330023", "0.5109582", "0.5108932", "0.5082648", "0.50546724", "0.5050566", "0.50337845", "0.5010708", "0.5009794", "0.50028986", "0.49980956", "0.49973607", "0.4979287", "0.49777344", "0.49740297", "0.49641642", "0.49345115" ]
0.7657903
0
This function determines if the user input is a valid player. If input is 'Q', exits program.
def is_valid_player(user_input): i = user_input.upper() if i in Board.player_decoder: return True elif i == 'Q': exit("\nExiting program. Thanks for using Clue Detective!\n") else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def validate_input(user_input: str) -> bool:\n\n if not user_input.islower():\n return False\n\n if user_input.endswith(\"yeet\"):\n return False\n \n if \"q\" or \"Q\" in user_input: # Check if q is a letter\n return False\n \n return True # If none of the conditions above are met", "def get_play_state():\r\n\toption = input('Choose P/p to Play, or Q/q to Quit: ').lower()\r\n\tif option == 'q':\r\n\t\treturn False\r\n\telif option == 'p':\r\n\t\treturn True\r\n\r\n\tprint('Invalid entry. Try again.')\r\n\r\n\treturn get_play_state() # Run function again until valid user input\r", "def validate_user_input(user_input):\n responses = ['t', 'r', 'q']\n return user_input in responses", "def player_input():\n x_o = ['X', 'O']\n player = \"\"\n while True:\n player = input('Choose your player X or O: ')\n if player.upper() in x_o:\n break\n else:\n print('It is neither X nor O! Choose again:')\n player = player.upper()\n print(f\"Your player is {player}\")\n return player", "def queryNewGame(self):\n print\n response = raw_input('Would you like to play again? ')\n return response.lower() in ('y', 'yes')", "def valid_input(player_num):\n player_input = input(\"Player \"+str(player_num)+ \" enter r to roll the die: \")\n player_input = player_input.lower()\n \n while player_input != \"r\":\n print(\"Invalid input\")\n player_input = input(\"Player \"+str(player_num)+\" enter r to roll the die: \")\n player_input = player_input.lower()", "def checkInput(userInput):\n if userInput == 'exit':\n return 0\n return 1", "def continue_playing():\n while True:\n print(\"\\nDo you want to continue? y/n\")\n choice = input().lower()\n if choice == 'y':\n return True\n if choice == 'n':\n return False\n print(\"Incorrect entry\")", "def is_valid(user_input, card_type=None, skip=False):\n \n i = user_input.upper()\n if i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n if skip:\n if i == 'X':\n return True\n if card_type:\n key_list = [key for key in Board.input_decoder \n if Board.input_decoder[key].type == card_type]\n if i in key_list:\n return True\n elif not card_type:\n if i in Board.input_decoder:\n return True \n else:\n return False", "def valid_response(prompt, *valid):\r\n ans = console_input(prompt).lower()\r\n\r\n if ans in valid:\r\n return True\r\n elif ans == '':\r\n return None\r\n\r\n return False", "def quit():\n while True:\n try:\n choice = input('press q to quit \\n r to restart')\n choice = choice.lower() # sanitize inputs before comparision\n\n except TypeError:\n print('Please enter q to quit or r to restart')\n if choice not in ('q', 'r'):\n continue\n else:\n break\n if choice == 'q':\n return True\n elif choice == 'r':\n return False", "def prompt_user_check_input(self):\r\n user_input = 0\r\n # grabs user input and changes it to an int\r\n while True:\r\n try:\r\n user_input = int(\r\n input(\"\\033[1;33mMake your move by entering the number of an open space on the board: \\033[0m\"))\r\n except ValueError:\r\n print(\"Why do you refuse to enter a number, Dave?\")\r\n continue\r\n else:\r\n break\r\n\r\n # makes sure the user enters a number 0-8 and verifies that the space the user selected is open\r\n if self.verify_valid_num(user_input) and self.write_user_choice(user_input):\r\n return True\r\n else:\r\n self.prompt_user_check_input()", "def getQuestion():\n\n tcflush(sys.stdin, TCIFLUSH)\n question = input(\" You say:\\n \")\n\n return validQuestion(question)", "def guess_input(self):\n try:\n self.player_guess = input('Guess a letter: ').lower()\n Character(self.player_guess, self.selected_phrase)\n except ValueError:\n print(\"That was not a valid input. Please pick a number between 1 and 10\")\n if self.player_guess == \"\":\n print (\"Please enter a letter,try again.\")\n if not self.player_guess.isalpha():\n print (\"Please only enter a letter(a-z),try again.\")\n if len(self.player_guess) > 1:\n print(\"Please enter only one letter at a time.\")", "def player_choice(board):\n position = -1\n while True:\n try:\n position = int(input(\"Choose your position: \"))\n\n if 0 < position <= 9:\n is_empty_position = space_check(board, position)\n if is_empty_position:\n break\n else:\n print('Position is not empty, choose again!')\n continue\n except ValueError:\n print('Invalid position, choose again!')\n return position", "def play_again():\n valid_answer = False\n while not valid_answer:\n response = input(\"Would you like to play again? \")\n valid_answer = check_inputs([\"Y\", \"N\"], response[0].capitalize())\n return response[0].capitalize() == \"Y\"", "def __prompt_name(self):\n self.clear_screen()\n self.__print_logo()\n\n name = input(\"[!] Enter new player name and press ENTER:\\n\\n \")\n if not (2 < len(name) < 16):\n self.clear_screen()\n self.__print_logo()\n print(\"Username must be between 3 and 15 characters.\")\n input(\"Press ENTER to return to player menu.\")\n elif name in self._roster.get_roster():\n self.clear_screen()\n self.__print_logo()\n print(\"Player already exists.\")\n input(\"Press ENTER to return to player menu.\")\n else:\n return name", "def ask_for_query():\n print('Enter query, empty to quit:')\n try:\n query = input('? ')\n except EOFError:\n # User has cancelled\n return False\n\n return query", "def player_choice(text):\n try:\n action_choice = input(text)\n return action_choice.lower()\n except NameError:\n print(\"Invalid input. Please try again.\")", "def y_n_prompt():\n start = input(\"Would you like to play a game? Y/N: \").upper()\n while True:\n try:\n if start != \"Y\":\n if start != \"N\":\n raise ValueError\n except ValueError:\n print(\"\\n\")\n print(\"Invalid character, Please try again\")\n y_n_prompt()\n else:\n if start.upper() == \"Y\":\n play_game()\n elif start.upper() == \"N\":\n print(\"\\n\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"Find me on GitHub, TechCentreUK\")\n print(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\")\n print(\"\\n\")\n exit()", "def replay(self):\r\n try:\r\n user_input = input('Would you like to play again? (Y/N) > ')\r\n if user_input.lower() not in ['y', 'n']:\r\n raise ValueError\r\n except ValueError:\r\n print(YELLOW + 'Please enter in \\'Y\\' or \\'N\\'.\\n' + END)\r\n self.replay()\r\n else:\r\n if user_input.lower() == 'y':\r\n return Game().main()\r\n else:\r\n print(YELLOW + '\\nThanks for playing! See you '\r\n 'next time!\\n' + END)\r\n sys.exit()", "def ready():\n rdy = False\n valid_answer = False\n while not rdy:\n while not valid_answer:\n response = input(\"Are you ready to play? \")\n valid_answer = check_inputs([\"Y\", \"N\"], response[0].capitalize())\n rdy = response[0].capitalize() == \"Y\"\n valid_answer = False", "def getAnswer(inp : str = \"\", num : int = None):\n\n\tanswer = input(inp + \" Y[es] or N[o]: \")\n\tanswer = answer.replace(\" \", \"\").lower() #make the string lowercase and without white spaces\n\tif not answer or not answer.isalpha(): \n\t\t#if user input only 'Enter' or input not contains only alphabetic symbols\n\t\tif not num is None:\n\t\t\tanswer = input(f\"You must be Enter 'Y[es]' if your number is {num} or 'N[o]' otherwise: \")\n\t\telse:\n\t\t\tanswer = input(\"You must be Enter 'Y[es]' if you want to play or 'N[o]' if you don't want to play: \")\n\n\telif answer not in (\"n\", \"not\", \"no\", \"y\", \"yes\", \"yeah\", \"yed\"):\n\t\tanswer = input(\"I don't understand. Please Enter your answer angain (Y[es] or N[o]): \")\n\n\tif answer in (\"n\", \"not\", \"no\"):\n\t\treturn False\n\telif answer in (\"y\", \"yes\", \"yeah\", \"yed\", \"ues\", \"ies\", \"ied\", \"oes\"):\n\t\treturn True", "def get_player():\n os.system('clear')\n print(\"Are you a returning player?\\n[y/n]\\n\")\n new = input('>')\n print()\n if new.lower() == 'n':\n user = new_player()\n elif new.lower() == 'y':\n user = load_player()\n else:\n print(\"Please enter 'y' or 'n'\")\n return get_player()\n return user", "def ask_for_cave():\n player_input = input(\"Which cave? \")\n if (player_input.isdigit() and\n int(player_input) in caves[player_location]):\n return int(player_input)\n else:\n print(player_input + \"?\")\n print(\"That's not a direction that I can see!\")\n return False", "def prompt(question):\n print('\\n')\n while True:\n reply = str(input(question+' (y/n): ')).lower().strip()\n if reply[:1] == 'y':\n return True\n if reply[:1] == 'n':\n return False", "def play():\n while True:\n user = input(\"Play Again? (Y/N) \").upper()\n if user == 'Y':\n main()\n else:\n print('Hope to see you again')\n sys.exit()", "def valid_input(choices):\n while True:\n print_pause(\"\\nYou may:\")\n for choice in choices:\n print_pause(choice)\n valid_input = input(\"\\nWhat would you like to do?\\n\")\n if valid_input == \"inventory\":\n print_pause(\"You are currently carrying:\")\n for item in inventory:\n print_pause(item)\n elif valid_input == \"quit\":\n confirm = input(\"Are you sure you want to quit? \")\n if confirm == \"yes\" or confirm == \"y\":\n print(\"Thanks for playing!\\n\")\n sys.exit()\n else:\n for key in choices.keys():\n if valid_input.lower() in choices[key]:\n key = key.replace('\\033[1;32m', '').replace('\\x1b[0m', '')\n# print(f\"returning: {key}\")\n return key\n print_pause(\"I'm sorry - I don't understand that. Please select\"\n \" one of the following choices.\")", "def ask(question):\n while True:\n query = input('{}\\n Reply (y/n) >>'.format(question))\n res = query[0].lower()\n if query == '' or not res in ['y', 'n']:\n pass\n else:\n break\n\n if res == 'y':\n return True\n else:\n return False" ]
[ "0.7061746", "0.7001091", "0.6993307", "0.685947", "0.679399", "0.6632149", "0.6587232", "0.6581674", "0.6562173", "0.65193605", "0.65072095", "0.6485321", "0.6314308", "0.6312182", "0.6304966", "0.63033706", "0.62971884", "0.62611985", "0.62489146", "0.6245538", "0.6236307", "0.6187586", "0.6186857", "0.61748827", "0.6173944", "0.6163096", "0.61538", "0.6152731", "0.61488134", "0.6140464" ]
0.8541527
0
This function determines if the user input is a valid card. If skip = True, also allows 'X' as a valid input. If input is 'Q', exits program.
def is_valid(user_input, card_type=None, skip=False): i = user_input.upper() if i == 'Q': exit("\nExiting program. Thanks for using Clue Detective!\n") if skip: if i == 'X': return True if card_type: key_list = [key for key in Board.input_decoder if Board.input_decoder[key].type == card_type] if i in key_list: return True elif not card_type: if i in Board.input_decoder: return True else: return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_valid_player(user_input):\n \n i = user_input.upper()\n if i in Board.player_decoder:\n return True\n elif i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n else:\n return False", "def player_discard(self, inpt):\n \n if inpt.isdigit() == False:\n return 0\n if int(inpt) > len(self.player_hand):\n print(\"\\nNumber of card entered is greater than number of cards\")\n print(\"Please try again \\n\")\n return 0\n if self.player_hand[int(inpt)-1][1] == '8':\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n while self.new_suit not in ['h','d','s','c']:\n self.new_suit = input(\"Please enter new suit: h, d, s, c\\n\")\n print(\"\\nNew suit is: \", self.new_suit)\n return 1\n if self.new_suit != '':\n if self.player_hand[int(inpt)-1][0] == self.new_suit:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n self.new_suit = ''\n return 1\n else:\n print(\"\\nYou need to match new suit\")\n print(\"Please try again\\n\")\n return 0\n if self.new_suit == '':\n if self.player_hand[int(inpt)-1][0] == self.discard_pile[0] or \\\n self.player_hand[int(inpt)-1][1] == self.discard_pile[1]:\n self.discard_pile = self.player_hand.pop(int(inpt)-1)\n return 1\n else:\n print(\"\\nYou need to match discard pile card suit or rank\")\n print(\"Please try again\\n\")\n return 0", "def _is_user_wants_to_continue(self):\n\n # dummy value to get in while\n user_input = -1\n while user_input != 1 and user_input != 2:\n\n try:\n # convert the string into int\n user_input = int(input())\n except ValueError:\n print(\"Please enter a number\")\n continue\n except Exception as e:\n print(\"something went wrong please try again \" + str(e))\n continue\n\n # check if the user_input was one of the options\n # if not present a error massage and try again\n if user_input != 1 and user_input != 2:\n print(\"Please enter a valid number(1-2)\")\n continue\n\n return user_input == 1", "def rawInputWithCheck(prompt):\n proceed = False\n i = None\n while not(proceed):\n i = raw_input(prompt)\n print \"Is this correct?\"\n print ' '*3, repr(i)\n proceed = YNInput(' '*2)\n return i", "def continue_playing():\n while True:\n print(\"\\nDo you want to continue? y/n\")\n choice = input().lower()\n if choice == 'y':\n return True\n if choice == 'n':\n return False\n print(\"Incorrect entry\")", "def validate_user_input(user_input):\n responses = ['t', 'r', 'q']\n return user_input in responses", "def card_type():\n while True: #Run until a suitable input is passed.\n question = input(\"Savings(S) or Current(C) >>> \")\n if question == \"S\": #if savings account\n return \"savings\"\n elif question == \"C\": #if current account\n return \"current\"", "def play_again():\n valid_answer = False\n while not valid_answer:\n response = input(\"Would you like to play again? \")\n valid_answer = check_inputs([\"Y\", \"N\"], response[0].capitalize())\n return response[0].capitalize() == \"Y\"", "def check_input(saved_input):\n if saved_input.lower() == \"!yes\":\n return True\n if saved_input.lower() == \"!no\":\n return False", "def end_input(self):\n inp = input()\n if inp.upper() == \"Q\":\n return False\n if inp == \"\" \\\n \"\":\n return True\n return self.end_input", "def dar_carta(shuffle_cards):\n\n print OTRA_CARTA\n r = raw_input(\">> \")\n\n if (r == \"y\"):\n return shuffle_cards.pop()\n else:\n return False", "def input_validation(self, prompt):\r\n\r\n while True:\r\n try:\r\n x, y = map(int, input(prompt).split())\r\n except ValueError: # when there is less than or more than 2 input values\r\n print('Invalid input try again.')\r\n continue\r\n if (x != self.selected[0]) or (y != self.selected[1]): # different from first choice\r\n if (0 <= x <= 3) and (0 <= y <= 12): # Valid input\r\n if not ([x, y] in self.bin): # Check if this card is still there or not\r\n break\r\n else:\r\n print('This card has already been taken.')\r\n continue\r\n else: # invalid input\r\n print('Row and column should be from 0 to 3 and 1 to 12 respectively.')\r\n continue\r\n else:\r\n print('Choose a card different from your first choice')\r\n continue\r\n return x, y", "def user_input():\n ans = input('Continue? : y/n ')\n if ans == 'n':\n return False\n else:\n return True", "def validate_input(user_input: str) -> bool:\n\n if not user_input.islower():\n return False\n\n if user_input.endswith(\"yeet\"):\n return False\n \n if \"q\" or \"Q\" in user_input: # Check if q is a letter\n return False\n \n return True # If none of the conditions above are met", "def validate_card():\r\n print(\"Please insert your card\")\r\n card = int(input(\"Please enter 1 if you entered your card\"))\r\n return card", "def blackjack():\n start_game = input('Would you like to play blackjack? Type \"y\" or \"n\": ').lower()\n if start_game == 'y':\n deal_cards()\n elif start_game == 'n':\n print('Maybe next time!')\n exit()\n else:\n print('Invalid selection. Please try again')\n blackjack()", "def should_continue():\n choice = input(\"Again (Y/N)? \").upper()\n while not choice or choice[0] not in ['Y', 'N']:\n choice = input(\"Please enter either 'Y' or 'N'. Again (Y/N)? \").upper()\n return choice[0] == 'Y'", "def play_again():\n while True:\n again = raw_input(\"Care to play again (yes/no)? \")\n if again in [\"yes\", \"no\"]:\n return False if again == \"no\" else True\n print \"Not a valid response. Type 'yes' or 'no': \"", "def ask_allow_purchase() -> bool:\n allow_purchase_str: str = ask_user_input(\"\\t\\t\\tAllow purchase: [Y/n] \")\n return allow_purchase_str.lower() == \"y\" or allow_purchase_str == \"\"", "def play_again(user, session_cards, deck):\n print(f\"{user.name},\\n\\tWould you like to play again?\")\n print(\"Enter 'y' or 'n'\\n\")\n if input('>')[0].lower() != 'n':\n print()\n play_memory(user, session_cards, deck)", "def play_again(self):\n while True:\n again = raw_input(\"Care to play again (yes/no)? \")\n if again in [\"yes\", \"no\"]:\n return False if again == \"no\" else True\n print \"Not a valid response. Type 'yes' or 'no': \"", "def validate_action(self, message=\"This action may delete data from the database. This action cannot be undone.\\nDo you wish to continue? (Y/N): \"):\n \n while True:\n print('\\n\\n')\n inp = input(message)\n \n if (inp.upper() == 'Y'):\n return True\n elif (inp.upper() == 'N'):\n return False\n \n print(\"Invalid input. Try again\")", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "def yes_or_no(question):\n while True:\n ques = input(question)\n if ques.lower().startswith('y'):\n return True\n elif ques.lower().startswith('n'):\n return False\n else:\n print('Y/yes or N/no? ')", "def prompt_user_check_input(self):\r\n user_input = 0\r\n # grabs user input and changes it to an int\r\n while True:\r\n try:\r\n user_input = int(\r\n input(\"\\033[1;33mMake your move by entering the number of an open space on the board: \\033[0m\"))\r\n except ValueError:\r\n print(\"Why do you refuse to enter a number, Dave?\")\r\n continue\r\n else:\r\n break\r\n\r\n # makes sure the user enters a number 0-8 and verifies that the space the user selected is open\r\n if self.verify_valid_num(user_input) and self.write_user_choice(user_input):\r\n return True\r\n else:\r\n self.prompt_user_check_input()", "def check_continue(config: SimpleNamespace, prev: str=None, next: str=None) -> None:\n if config.general.debug_mode:\n if prev and next:\n print(f'\\n{prev.upper()} phase completed. Next up: {next.upper()} phase.')\n x = input('\\nDo you want to continue y/n? ')\n if x not in ['yes', 'y', '']:\n print()\n sys.exit(0)", "def card(phenny, input):\n if not input.group(2):\n phenny.say(input.nick + 'Perhaps you meant \".card Storm Crow\"?')\n else:\n card_name = input.group(2).strip().lower().title()\n if card_name in nick.nicknames:\n card_name = nick.nicknames[card_name]\n card_text = get_card(card_name)\n if card_text:\n phenny.reply(card_text)\n else:\n phenny.reply(\"I could not find a card by that name.\")", "def play_again():\n decision = \" \"\n while not(decision[0] == \"y\") and not(decision[0] == \"n\"):\n decision = input(\"Would you like to play again? \").lower()\n if decision[0]==\"y\":\n return True\n else:\n return False", "def play_pass(which_side):\n\n if which_side == 'opp':\n \n bad_input = True\n while bad_input:\n play_pass = input('Would you like to play the event card or pass it? (play/pass)')\n if play_pass.lower() != 'play' and play_pass.lower() != 'pass':\n print('type in play or pass')\n else:\n bad_input = False\n return play_pass.lower()\n \n else:\n \n return 'play'", "def request_play_again():\n print('Do you want to play again? (yes or no)')\n return input().lower().startswith('y')" ]
[ "0.6342533", "0.59396064", "0.58468616", "0.58279556", "0.5803147", "0.5801814", "0.5795636", "0.5759419", "0.57581276", "0.5757214", "0.57171595", "0.5706514", "0.5694613", "0.5655004", "0.5642363", "0.5630517", "0.5617309", "0.5616001", "0.55874807", "0.55715656", "0.5568929", "0.55608374", "0.5540261", "0.5538218", "0.5521189", "0.5511672", "0.5505089", "0.55014306", "0.54929316", "0.54897374" ]
0.8009378
0
This function collects a list user inputs for players and suspects and decodes them.
def collect_players_and_suspects_list(): players_list = [] while (players_input := input("Enter player: ")) != '#': i = players_input.upper() if not is_valid_player(i): print("Please enter a valid Suspect.") continue if i not in players_list: players_list.append(i) players_decoded = [Board.identify(player) for player in players_list] suspects_decoded = [Board.translate(player) for player in players_list] return players_decoded, suspects_decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_players_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded", "def collect_cards():\n \n cards_list = []\n while (cards_input := input(\"Enter card: \")) != '#':\n i = cards_input.upper()\n if not is_valid(i):\n print(f\"Please enter a valid card.\")\n continue\n cards_list.append(i)\n cards_decoded = [Board.translate(card) for card in cards_list]\n return cards_decoded", "def process_input(data):\n if data == \"NAME\":\n return get_name()\n \n elif data == \"SHIP PLACEMENT\":\n return get_ship_placements()\n \n elif data == \"SHOT LOCATION\":\n return get_shot_location()\n \n elif data == \"MISS\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data == \"HIT\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data[:4] == \"SUNK\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data[:13] == \"OPPONENT SHOT\":\n tokens = data[14:].split(\",\")\n OPPONENT_SHOTS.append((int(tokens[0]), int(tokens[1]), tokens[2]))\n \n elif data == \"WIN\":\n return False\n \n elif data == \"LOSE\":\n return False\n \n elif data == \"ERROR\":\n return False\n \n return None", "def vsepr_parse_user_answer(user_input):\r\n return json.loads(user_input)", "def get_user_list(question):\n return [int(s) for s in input(question).split()]", "def get_data_from_user(questions, answers_types, id_storage, id_, is_alpha):\n user_data = []\n\n for i in range(len(questions)):\n user_input = None\n\n while type(user_input) != answers_types[i]:\n user_input = ui.get_inputs([questions[i]], '')[0]\n user_input = get_correct_data_types(user_input, answers_types[i], is_alpha[i])\n\n # Other differences while asking for data here\n\n user_data.append(user_input)\n\n user_data = [str(record) for record in user_data]\n\n return user_data", "def test__parse_prompts():\n prompt = OnboardingPrompt(name = 'ibuki')\n \n for input_value, expected_output in (\n ({}, None),\n ({'prompts': None}, None),\n ({'prompts': [prompt.to_data()]}, (prompt, )),\n ):\n output = parse_prompts(input_value)\n vampytest.assert_eq(output, expected_output)", "def sanitize_user_input(self, user_input: Sequence[str]) -> Set[str]:\n sanitized_names = set()\n for name in user_input:\n providers_for_name = self.provided_by(name)\n if not providers_for_name:\n aurman_error(\"No providers for {} found.\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"No providers for {} found.\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n elif len(providers_for_name) == 1:\n sanitized_names.add(providers_for_name[0].name)\n # more than one provider\n else:\n dep_providers_names = [package.name for package in providers_for_name]\n dep_name = strip_versioning_from_name(name)\n\n # name matches one of the providers names\n if dep_name in dep_providers_names:\n sanitized_names.add(dep_name)\n else:\n aurman_note(\n \"We found multiple providers for {}\\nChoose one by entering the corresponding number.\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(name))\n )\n )\n\n while True:\n for i in range(0, len(providers_for_name)):\n print(\n \"Number {}: {}\".format(i + 1, self.repo_of_package(providers_for_name[i].name))\n )\n\n try:\n user_input = int(input(aurman_question(\"Enter the number: \", False, False)))\n if 1 <= user_input <= len(providers_for_name):\n sanitized_names.add(providers_for_name[user_input - 1].name)\n break\n except ValueError:\n print(aurman_error(\"That was not a valid choice!\", False, False))\n else:\n print(aurman_error(\"That was not a valid choice!\", False, False))\n\n return sanitized_names", "def get_user_inputs():\n print('Enter the path to the loan applications file, path to the output file, N (the starting capital), K (the max number of concurrently active loans)')\n print('For example: applications.json approved.txt 50000 1000')\n user_input = raw_input()\n return user_input.split()", "def parse_input(question_ids, answer_ids):\r\n input_ids = list()\r\n input_ids.append(BERT_CLS)\r\n input_ids.extend(question_ids)\r\n input_ids.append(BERT_SEP)\r\n input_ids.extend(answer_ids)\r\n input_ids_truncated = input_ids[:BERT_INPUT_WORD_LEN]\r\n # print(input_ids_truncated)\r\n assert len(input_ids_truncated) <= BERT_INPUT_WORD_LEN, 'input_ids len can not exceed %d' % BERT_INPUT_WORD_LEN\r\n # print('input_ids_truncated_len ', len(input_ids_truncated))\r\n segment_ids = list()\r\n segment_question_ids = ['0'] * (len(question_ids) + 2)\r\n segment_answer_ids = ['1'] * (len(input_ids_truncated) - len(question_ids) - 2)\r\n segment_ids.extend(segment_question_ids)\r\n segment_ids.extend(segment_answer_ids)\r\n input_masks = ['1'] * len(input_ids_truncated)\r\n input_ids_parsed = RECORD_SPLIT_FLAG.join(input_ids_truncated)\r\n segment_ids_str = RECORD_SPLIT_FLAG.join(segment_ids)\r\n input_masks_str = RECORD_SPLIT_FLAG.join(input_masks)\r\n # print('segmend_ids ', segment_ids_str)\r\n # print('input_masks ', input_masks_str)\r\n return input_ids_parsed, segment_ids_str, input_masks_str", "def establish_players(n_players):\n usernames_out = [input('Please input a username for player ' +str(i)) for i in range(n_players)]\n return {'username':usernames_out}", "def format_raw_input(user_input):\n # Replace silly “ or ” characters with \"\n # TODO: Swap out with regex\n raw_input = user_input.strip().replace(\n '“', '\"').replace(\"”\", '\"').replace(\",\", \"\").replace(\"\\n\", \" \")\n # Break apart the string into each coordinate\n raw_inputs = [r.replace('\"', '') for r in raw_input.split('\" \"')]\n # Return coordinates as lists of ints.\n return [[int(i) for i in r.split(\" \")] for r in raw_inputs]", "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "def user_guess():\n return list(input(\"What is your guess?\"))", "async def parse_input_args_filters(ctx, commands, args) -> (discord.Member, bool, str, list, list, list):\n user = None\n has_all = False\n group_by_key = 'set_code'\n affiliation_names = []\n rarity_codes = []\n card_codes = []\n\n # Parse all the arguments\n for arg in args:\n # Check if the argument is a user\n try:\n converter = commands.MemberConverter()\n user = await converter.convert(ctx=ctx, argument=arg)\n # Check if the argument is an affiliation\n except commands.errors.MemberNotFound:\n argLowerCase = arg.lower()\n if argLowerCase == 'all':\n has_all = True\n elif argLowerCase in ['a', 'affiliation', 'affiliations']:\n group_by_key = 'affiliation_name'\n elif argLowerCase in ['f', 'faction', 'factions']:\n group_by_key = 'faction_name'\n elif argLowerCase in ['rar', 'rarity']:\n group_by_key = 'rarity_code'\n elif argLowerCase in ['nogroup', 'nogroups']:\n group_by_key = ''\n elif argLowerCase in ['v', 'villain', 'villains']:\n affiliation_names.append('Villain')\n elif argLowerCase in ['h', 'hero', 'heroes']:\n affiliation_names.append('Hero')\n elif argLowerCase in ['n', 'neutral', 'neutrals']:\n affiliation_names.append('Neutral')\n elif argLowerCase in ['s', 'starter', 'starters']:\n rarity_codes.append('S')\n elif argLowerCase in ['c', 'common']:\n rarity_codes.append('C')\n elif argLowerCase in ['u', 'uncommon']:\n rarity_codes.append('U')\n elif argLowerCase in ['r', 'rare']:\n rarity_codes.append('R')\n elif argLowerCase in ['l', 'legendary']:\n rarity_codes.append('L')\n elif is_valid_card_number_format(arg):\n card_codes.append(arg)\n else:\n raise ValueError('Invalid argument: {}'.format(arg))\n\n if card_codes and (has_all or affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. You can\\'t mix card numbers and batch.')\n elif has_all and (affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. Use either \\\"all\\\" or affiliation/rarity name but not both.')\n\n return user, has_all, group_by_key, affiliation_names, rarity_codes, card_codes", "def determine_marks():\n # ask player 1 if X or O\n valid_answer = False\n while not valid_answer:\n p1_string = input(\"Player 1: Would you like X or O? \")\n p1_mark = p1_string[0].capitalize()\n valid_answer = check_inputs([\"X\", \"O\"], p1_mark)\n if p1_mark == 'X':\n return {\"Player 1\": \"X\", \"Player 2\": \"O\"}\n else:\n return {\"Player 1\": \"O\", \"Player 2\": \"X\"}", "def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id", "def accept_inputs(fields):\n user_is_not_happy = True\n while user_is_not_happy:\n # store the response provisionally until we know the user wants to keep it\n provisional_response_dict = {}\n for field in fields:\n provisional_response_dict[field] = str(raw_input(\"%s: \" % field))\n response = str(raw_input(\"Happy with this? y/n: \"))\n if response == \"y\":\n user_is_not_happy = False\n else:\n # if this is the case, we go around again\n user_is_not_happy = True\n\n # return the provisional_response_dict\n return provisional_response_dict", "def build_player_data():\n names = [\"Gunther O'Brian\",\n 'Workman Gloom',\n 'Esme Ramsey',\n 'Cornelius Games',\n 'Kline Greenlemon',\n 'Hotbox Sato',\n 'Famous Owens',\n 'Jenkins Good']\n nums = [77, 31, 37, 6, 14, 53, 7, 64]\n avgs = [0.40666, 0.118451, 0.400093, 0.335117,\n 0.425694, 0.353378, 0.179842, 0.246856]\n\n return names, nums, avgs", "def ask_info_player(self) -> str:\n\n print(\"Enter first name : \")\n while True:\n first_name = input()\n if check.check_input_string_special(first_name) is True:\n if check.check_input_string_len(first_name) is True:\n if check.check_input_string_integer(first_name) is True:\n break\n\n print(\"Enter last name : \")\n while True:\n last_name = input()\n if check.check_input_string_special(last_name) is True:\n if check.check_input_string_len(last_name) is True:\n if check.check_input_string_integer(last_name) is True:\n break\n\n print(\"Enter date of birth with this format YEAR-MONTH-DAY : \")\n birthday = check.check_date_input()\n\n print(\n \"Enter a number for choose the gender : \\n\"\n \"1 - Man \\n\"\n \"2 - Women\"\n )\n genre = check.request_selection_with_number(\"Man\", \"Women\", \"none\")\n\n print(\"\\n The player {} {}, {}, birth on {} has been added to the database !\".format(\n first_name,\n last_name,\n genre,\n birthday))\n\n return first_name, last_name, birthday, genre", "def main():\n counter = 0\n inputs = ['','','']\n score_one = 0\n score_two = 0\n\n for line in sys.stdin:\n # Line 1 is the number of cards to expect for each player (1 <= N <= 1000)\n if counter == 0:\n inputs[counter] = line.strip()\n else:\n inputs[counter] = list(line.strip().replace(' ', ''))\n for each in range(int(inputs[0])):\n inputs[counter][each] = inputs[counter][each].replace('A', '13')\n inputs[counter][each] = inputs[counter][each].replace('K', '12')\n inputs[counter][each] = inputs[counter][each].replace('Q', '11')\n inputs[counter][each] = inputs[counter][each].replace('J', '10')\n\n counter += 1\n\n for card in range(int(inputs[0])):\n # if they're the same, do nothing\n if int(inputs[1][card]) == int(inputs[2][card]):\n continue\n\n # if A is greater, plus one:\n if int(inputs[1][card]) > int(inputs[2][card]):\n score_one += 1\n else:\n score_two += 1\n\n if score_one > score_two:\n print \"PLAYER 1 WINS\"\n elif score_two > score_one:\n print \"PLAYER 2 WINS\"\n else:\n print \"TIE\"", "def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self.input = raw_input.split('\\n')\n self.input = self.input[0:-1]\n\n self.packets = []\n for item in self.input:\n if item:\n self.packets.append(literal_eval(item))", "def get_inputs(title, list_labels):\n print(f\"{title}\")\n # list which holds the input answers from user\n user_inputs = []\n for item in list_labels:\n user_inputs.append(input(f\"{item}: \"))\n return user_inputs", "def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])", "def initialize_players():\n while True:\n nb_of_players = input(\"\\nEntrez le nombre de joueurs : \")\n if not nb_of_players.isdigit():\n print(\"You have to enter a number!\")\n else:\n nb_of_players = int(nb_of_players)\n if nb_of_players < 2:\n print(\"You have to enter at least two!\")\n else:\n break\n nb_of_players = int(nb_of_players)\n list_of_players = [] #This list is going to be returned\n names_secure = [] #stores player's names in lower mode for security\n for index in range(1, nb_of_players+1):\n while True:\n player_name = input(\"Entrer le nom du joueur {} \".format(index))\n if (player_name.lower() == 'end' or player_name.lower() in names_secure):\n print(\"Incorrect Name\")\n else:\n names_secure.append(player_name.lower())\n new_player = Player(player_name)\n list_of_players.append(new_player)\n break\n return list_of_players", "def user_input():\n guess_num = int(input(\"please type four-digit: \"))\n guess_list = list(str(guess_num))\n return guess_list", "def get_player_name():\n\n player = list(input(\"\\nEnter the name of the Football player: \").split(\" \"))\n player_name = \" \".join([i.capitalize() for i in player])\n return player_name", "def create_input_list(prompt):\n list_countries = input(prompt).split(\", \")\n list_countries = [x.lower() for x in list_countries] \n return list_countries", "def get_input() -> List[str]:\n with open('aoc_cookie.json') as c:\n data = load(c)\n\n day = argv[0].split('/')[-1].split('.')[0].split('AOC')[-1]\n # Load the cookie from .json\n headers = {'cookie': data['cookie']}\n # GET to the challenge\n r = get(f'https://adventofcode.com/2020/day/{day}/input', headers=headers)\n return r.text.split('\\n')[:-1]", "def play():\n display_starting_message()\n print(\"\")\n print(\"*\"*10)\n for question_number, question in enumerate(list_of_questions):\n print(question)\n print(\"\")\n for responses in list_of_questions[question]:\n print(responses)\n pick_one = input(\"pick one: \")\n check_murder_sauce(question, pick_one)\n\n murder_sauce_result(murder_sauce)" ]
[ "0.7785427", "0.5724351", "0.5629555", "0.5615475", "0.5556688", "0.54903173", "0.53061044", "0.5302518", "0.5204586", "0.51927483", "0.5189104", "0.515343", "0.51155186", "0.5102323", "0.50311154", "0.50235236", "0.5017103", "0.49988022", "0.49984848", "0.4991658", "0.49902153", "0.4981326", "0.49717328", "0.49711126", "0.49603295", "0.49573192", "0.49542975", "0.49509275", "0.49415714", "0.49263304" ]
0.8002285
0
This function collects a list user inputs for players and decodes them.
def collect_players_list(): players_list = [] while (players_input := input("Enter player: ")) != '#': i = players_input.upper() if not is_valid_player(i): print("Please enter a valid Suspect.") continue if i not in players_list: players_list.append(i) players_decoded = [Board.identify(player) for player in players_list] suspects_decoded = [Board.translate(player) for player in players_list] return players_decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_players_and_suspects_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded, suspects_decoded", "def getPlayerListFromGUI(self):\n playerlist = []\n # Tried to be cheeky and only have this called on initialization, but this made adding / removing to player list in real time impossible\n # Get input list of target players\n src = \"./data/player_list.txt\"\n txt = open(src, \"r\", encoding=\"utf8\")\n\n for aline in txt:\n values = aline.strip(\"\\n\").split(\",\")\n playerlist.append(values)\n txt.close()\n\n return playerlist", "def initialize_players():\n while True:\n nb_of_players = input(\"\\nEntrez le nombre de joueurs : \")\n if not nb_of_players.isdigit():\n print(\"You have to enter a number!\")\n else:\n nb_of_players = int(nb_of_players)\n if nb_of_players < 2:\n print(\"You have to enter at least two!\")\n else:\n break\n nb_of_players = int(nb_of_players)\n list_of_players = [] #This list is going to be returned\n names_secure = [] #stores player's names in lower mode for security\n for index in range(1, nb_of_players+1):\n while True:\n player_name = input(\"Entrer le nom du joueur {} \".format(index))\n if (player_name.lower() == 'end' or player_name.lower() in names_secure):\n print(\"Incorrect Name\")\n else:\n names_secure.append(player_name.lower())\n new_player = Player(player_name)\n list_of_players.append(new_player)\n break\n return list_of_players", "def establish_players(n_players):\n usernames_out = [input('Please input a username for player ' +str(i)) for i in range(n_players)]\n return {'username':usernames_out}", "def get_user_list(question):\n return [int(s) for s in input(question).split()]", "def collect_cards():\n \n cards_list = []\n while (cards_input := input(\"Enter card: \")) != '#':\n i = cards_input.upper()\n if not is_valid(i):\n print(f\"Please enter a valid card.\")\n continue\n cards_list.append(i)\n cards_decoded = [Board.translate(card) for card in cards_list]\n return cards_decoded", "def create_players_id_dict(self) -> list:\n players_id = []\n self.show_players()\n print(\"\\n\" + \"Enter id of wanted players : \")\n while len(players_id) < 8:\n while True:\n id_choice = check.request_id(PLAYERS)\n if check.check_not_same_value(players_id, id_choice) is True:\n players_id.append(id_choice)\n break\n return players_id", "def create_input_list(prompt):\n list_countries = input(prompt).split(\", \")\n list_countries = [x.lower() for x in list_countries] \n return list_countries", "def get_data_from_user(questions, answers_types, id_storage, id_, is_alpha):\n user_data = []\n\n for i in range(len(questions)):\n user_input = None\n\n while type(user_input) != answers_types[i]:\n user_input = ui.get_inputs([questions[i]], '')[0]\n user_input = get_correct_data_types(user_input, answers_types[i], is_alpha[i])\n\n # Other differences while asking for data here\n\n user_data.append(user_input)\n\n user_data = [str(record) for record in user_data]\n\n return user_data", "def create_players(self):\n for i in range(self.number_of_players):\n self.players_names.append(pyip.inputStr(\n prompt=f'\\nEnter name of player {i + 1}:\\n'))", "def make_player_list(player_arg):\n\n players = []\n\n names_pieces = player_arg.split(',')\n for name_piece in names_pieces:\n player_name, piece_name = name_piece.split(':')\n piece = PIECE_MAP[piece_name]\n players.append(player.Player(player_name, piece))\n\n return players", "def get_player_name():\n\n player = list(input(\"\\nEnter the name of the Football player: \").split(\" \"))\n player_name = \" \".join([i.capitalize() for i in player])\n return player_name", "def load_inputs(self, player, mixer, inputs):\r\n raise NotImplementedError", "def load_inputs(self, player, mixer, inputs):\r\n raise NotImplementedError", "def vsepr_parse_user_answer(user_input):\r\n return json.loads(user_input)", "def process_input(data):\n if data == \"NAME\":\n return get_name()\n \n elif data == \"SHIP PLACEMENT\":\n return get_ship_placements()\n \n elif data == \"SHOT LOCATION\":\n return get_shot_location()\n \n elif data == \"MISS\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data == \"HIT\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data[:4] == \"SUNK\":\n MY_SHOTS[LAST_SHOT[0]][LAST_SHOT[1]] = data\n \n elif data[:13] == \"OPPONENT SHOT\":\n tokens = data[14:].split(\",\")\n OPPONENT_SHOTS.append((int(tokens[0]), int(tokens[1]), tokens[2]))\n \n elif data == \"WIN\":\n return False\n \n elif data == \"LOSE\":\n return False\n \n elif data == \"ERROR\":\n return False\n \n return None", "def test_get_player_names(self):\n INPUT.side_effect = ['A', 'M', 'Z', '']\n names = game.pig.get_player_names()\n self.assertEqual(names, ['A', 'M', 'Z'])", "def show_players(self) -> None:\n players_list = []\n for player in PLAYERS:\n data_player = ((\n str(player.get(\"first_name\")) + \" \" +\n str(player.get(\"last_name\")) + \" | \" +\n str(player.get(\"birthday\")) + \" | \" +\n str(player.get(\"genre\")) + \" | \" +\n str(player.get(\"ranking\"))\n ))\n players_list.append(data_player)\n utils.clear_terminal()\n print(\n \"Do you want the list of players by alphabetical order or by ranking ? \\n\"\n \"1 - Ranking players list \\n\"\n \"2 - Alphabetical players list\"\n )\n choice = check.request_selection_with_number(\"ranking\", \"alphabetical\", \"None\")\n if choice == \"ranking\":\n player_id = 0\n players_list = sorted(players_list, key=lambda player: players_list[4])\n utils.clear_terminal()\n print(\"==========================================\")\n print(\"List of all Players in ranking order : \")\n print(\"==========================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)\n elif choice == \"alphabetical\":\n player_id = 0\n players_list.sort()\n utils.clear_terminal()\n print(\"============================================\")\n print(\"List of all Players in alphabetical order : \")\n print(\"============================================\")\n for player in players_list:\n player_id += 1\n print(str(player_id) + \" : \" + player)", "def build_player_data():\n names = [\"Gunther O'Brian\",\n 'Workman Gloom',\n 'Esme Ramsey',\n 'Cornelius Games',\n 'Kline Greenlemon',\n 'Hotbox Sato',\n 'Famous Owens',\n 'Jenkins Good']\n nums = [77, 31, 37, 6, 14, 53, 7, 64]\n avgs = [0.40666, 0.118451, 0.400093, 0.335117,\n 0.425694, 0.353378, 0.179842, 0.246856]\n\n return names, nums, avgs", "def sanitize_user_input(self, user_input: Sequence[str]) -> Set[str]:\n sanitized_names = set()\n for name in user_input:\n providers_for_name = self.provided_by(name)\n if not providers_for_name:\n aurman_error(\"No providers for {} found.\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n raise InvalidInput(\"No providers for {} found.\".format(Colors.BOLD(Colors.LIGHT_MAGENTA(name))))\n elif len(providers_for_name) == 1:\n sanitized_names.add(providers_for_name[0].name)\n # more than one provider\n else:\n dep_providers_names = [package.name for package in providers_for_name]\n dep_name = strip_versioning_from_name(name)\n\n # name matches one of the providers names\n if dep_name in dep_providers_names:\n sanitized_names.add(dep_name)\n else:\n aurman_note(\n \"We found multiple providers for {}\\nChoose one by entering the corresponding number.\".format(\n Colors.BOLD(Colors.LIGHT_MAGENTA(name))\n )\n )\n\n while True:\n for i in range(0, len(providers_for_name)):\n print(\n \"Number {}: {}\".format(i + 1, self.repo_of_package(providers_for_name[i].name))\n )\n\n try:\n user_input = int(input(aurman_question(\"Enter the number: \", False, False)))\n if 1 <= user_input <= len(providers_for_name):\n sanitized_names.add(providers_for_name[user_input - 1].name)\n break\n except ValueError:\n print(aurman_error(\"That was not a valid choice!\", False, False))\n else:\n print(aurman_error(\"That was not a valid choice!\", False, False))\n\n return sanitized_names", "def playerinput(kleuren):\r\n vierhidden = []\r\n i = 0\r\n try:\r\n while i < 4:\r\n kleur1, kleur2, kleur3, kleur4 = input('Geef jouw combinatie: ').split()\r\n kleurcombinatie = kleur1, kleur2, kleur3, kleur4\r\n for kleur in kleurcombinatie:\r\n if kleur not in kleuren:\r\n print('Kies een van de kleuren uit de lijst.')\r\n else:\r\n vierhidden.append(kleur)\r\n i += 1\r\n except:\r\n print('Geef 4 kleuren uit de lijst in 1 regel, met spatie en kleine letters')\r\n return playerinput(kleurenlijst)\r\n return vierhidden", "def ask_user():\r\n while True:\r\n if bj.player1.double_down is True and bj.player1.split is True and bj.player1.went_split is False:\r\n p_choice = input(\"Hit, Stand, Double Down or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.split is True and bj.player1.went_split is False: # various input prompts depending on available player choices\r\n p_choice = input(\"Hit, Stand or Split?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"split\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n elif bj.player1.double_down is True:\r\n p_choice = input(\"Hit, Stand or Double Down?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\" and p_choice != \"dd\" and p_choice != \"double\" and p_choice != \"double down\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice\r\n else:\r\n p_choice = input(\"Hit or Stand?\\n\")\r\n if p_choice != \"hit\" and p_choice != \"stand\":\r\n print(\"Wrong input.\\n\")\r\n continue\r\n else:\r\n return p_choice", "def get_inputs(title, list_labels):\n print(f\"{title}\")\n # list which holds the input answers from user\n user_inputs = []\n for item in list_labels:\n user_inputs.append(input(f\"{item}: \"))\n return user_inputs", "def get_user_inputs():\n print('Enter the path to the loan applications file, path to the output file, N (the starting capital), K (the max number of concurrently active loans)')\n print('For example: applications.json approved.txt 50000 1000')\n user_input = raw_input()\n return user_input.split()", "def set_name(self):\n player1 = input('Enter a name for player 1: ')\n self._players.append(player1)\n player2 = input('Enter a name for player 2: ')\n self._players.append(player2)\n print()\n return self._players", "def user_input():\n guess_num = int(input(\"please type four-digit: \"))\n guess_list = list(str(guess_num))\n return guess_list", "def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self.input = raw_input.split('\\n')\n self.input = self.input[0:-1]\n\n self.packets = []\n for item in self.input:\n if item:\n self.packets.append(literal_eval(item))", "def readInput(fileName):\n\n playerOne = []\n playerTwo = []\n\n\n with open(fileName, 'r') as file:\n fileList = file.read()\n\n fileList = fileList.split(\"\\n\\nPlayer 2:\\n\")\n\n playerOne = fileList[0].split(\"\\n\")\n playerOne = list(map(int, playerOne[1:]))\n\n playerTwo = fileList[1].split(\"\\n\")\n playerTwo = list(map(int, playerTwo))\n\n return playerOne, playerTwo", "def parsePlayerData():\n\ttry:\n\t\trawPlayerData = str(re.findall(bracketRegex, urllib.urlopen(mapURL).read())[0])\n\texcept:\n\t\tprint \"exception!\"\n\t\trawPlayerData = None\n\tif rawPlayerData is not None:\n\t\tfixedPlayerData = re.sub(\"'(\\d+)'\", '\\g<1>', rawPlayerData).replace(\"\\\\'\", \"\").replace(\"'\", '\"')\n\t\treturn json.loads(fixedPlayerData, 'latin1')", "def get_inputs(list_labels, title):\n inputs = []\n\n print(f\"\\t{title}\")\n for label in list_labels:\n user_input = input(f\"\\t{label}\").strip()\n user_input = user_input.replace(\";\",\"\")\n inputs.append(user_input)\n return inputs" ]
[ "0.7349576", "0.5860871", "0.5805779", "0.57845634", "0.5769686", "0.57296795", "0.56243366", "0.5612682", "0.5509435", "0.5483779", "0.54824257", "0.54214525", "0.54100615", "0.54100615", "0.5393822", "0.5390574", "0.53871775", "0.5355974", "0.5330425", "0.531194", "0.53072214", "0.52784926", "0.527523", "0.5243253", "0.52256423", "0.52111256", "0.5198752", "0.51771474", "0.51646", "0.51198447" ]
0.7726118
0
This function collects a list user inputs for cards and decodes them.
def collect_cards(): cards_list = [] while (cards_input := input("Enter card: ")) != '#': i = cards_input.upper() if not is_valid(i): print(f"Please enter a valid card.") continue cards_list.append(i) cards_decoded = [Board.translate(card) for card in cards_list] return cards_decoded
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_players_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded", "def collect_players_and_suspects_list():\n \n players_list = []\n while (players_input := input(\"Enter player: \")) != '#':\n i = players_input.upper()\n if not is_valid_player(i):\n print(\"Please enter a valid Suspect.\")\n continue\n if i not in players_list:\n players_list.append(i)\n players_decoded = [Board.identify(player) for player in players_list]\n suspects_decoded = [Board.translate(player) for player in players_list]\n return players_decoded, suspects_decoded", "def read_card():\n suit_is_valid = False\n while not suit_is_valid:\n suit_input = input('Suit: ').upper()\n for suit in Suit:\n if suit_input == suit.name:\n card_suit = suit\n suit_is_valid = True\n\n rank_is_valid = False\n while not rank_is_valid:\n rank_input = input('Rank: ').upper()\n for rank in Rank:\n if rank_input == rank.name:\n card_rank = rank\n rank_is_valid = True\n return Card(card_suit, card_rank)", "def make_card_list_from_string(string, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n deck_list = []\n while len(string) >= 14:\n x = 'card_' + string[7:12]\n card = eval (x)\n deck_list.append(card)\n\n string = string[14:]\n\n\n return deck_list", "def new_card():\n print('x'*50)\n name_str=input('name plz:')\n phone_str=input('phone num plz:')\n qq_str=input('qq num plz:')\n email_str=input('mail plz:')\n card_dict={'name':name_str,'phone':phone_str,'qq':qq_str,'email':email_str}\n card_list.append(card_dict)\n print('the card of %s has been created.'%name_str)\n print(card_dict)\n #1.need info from user\n #2.use the input info to create a dictionary\n #3.add the dic to card_list\n #4.tell the user the function has done.", "def main(stdin):\n ret = []\n nlines = 0\n incard = False\n for line in stdin:\n nlines = nlines + 1\n # Will raise a Unicode... exception\n line = line.encode('ascii').strip()\n if incard:\n ret[-1].append(line)\n if line == 'END:VCARD':\n incard = False\n else: # This line must match.\n if line != 'BEGIN:VCARD':\n raise Exception(\n 'Not start of VCARD (line ' + str(nlines) + '): ' + line)\n ret.append([line, ])\n incard = True\n # Done.\n return ret", "def make_deck_from_string(string, ai_settings, screen, buttons,screen_status, button_status, card_database_filter, user, player2):\n deck_list = []\n while len(string) >= 14:\n x = 'card_' + string[7:9] + '_' + string[10:12]\n card = eval (x)\n if card.card_type == 'monster':\n deck_list.append(Monster(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n attack= card.attack, health= card.health,lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'tactic':\n deck_list.append(Tactic(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'item':\n deck_list.append(Item(name = card.name, set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n lv_type= card.lv_type,lv_active_level= card.lv_active_level, special_effect= card.special_effect))\n elif card.card_type == 'character':\n deck_list.append(Character(name = card.name,set_number= card.set_number,card_number= card.card_number,card_type= card.card_type,job= card.job,level= card.level,\n health= card.health,skill_1_lv = card.skill_1_lv, skill_1_type = card.skill_1_type,skill_2_lv = card.skill_2_lv, skill_2_type = card.skill_2_type,skill_3_lv = card.skill_3_lv, skill_3_type = card.skill_3_type))\n\n string = string[14:]\n\n\n return deck_list", "def evalcards(cardA, cardB, cardC, cardD):\n array = []\n ranks = []\n spadessort = []\n cardsinsuit = 1\n # BASESUIT definitions\n if cardA[-3:] == \"SPA\":\n basesuit = suitspades\n if cardA[-3:] == \"HEA\":\n basesuit = suithearts\n if cardA[-3:] == \"DIA\":\n basesuit = suitdiamonds\n if cardA[-3:] == \"CLB\":\n basesuit = suitclubs\n if cardB in basesuit:\n cardsinsuit += 1\n if cardC in basesuit:\n cardsinsuit += 1\n if cardD in basesuit:\n cardsinsuit += 1\n #BEGIN SORTING CARDS\n cardBBB = cardB\n cardCCC = cardC\n cardDDD = cardD\n if cardB not in basesuit:\n cardBBB = basesuit[12]\n if cardC not in basesuit:\n cardCCC = basesuit[12]\n if cardD not in basesuit:\n cardDDD = basesuit[12]\n array += [str(basesuit.index(cardA))]\n if len(str(basesuit.index(cardA))) == 1:\n del array[0]\n array += [\"0\"+str(basesuit.index(cardA))]\n array += [str(basesuit.index(cardBBB))]\n if len(str(basesuit.index(cardBBB))) == 1:\n del array[1]\n array += [\"0\"+str(basesuit.index(cardBBB))]\n array += [str(basesuit.index(cardCCC))]\n if len(str(basesuit.index(cardCCC))) == 1:\n del array[2]\n array += [\"0\"+str(basesuit.index(cardCCC))]\n array += [str(basesuit.index(cardDDD))]\n if len(str(basesuit.index(cardDDD))) == 1:\n del array[3]\n array += [\"0\"+str(basesuit.index(cardDDD))]\n array.sort()\n for x in range(0,cardsinsuit):\n ranks += [basesuit[int(array[x])]]\n #CHECKING FOR NOT IN SUIT AND FOR SPADES\n if cardB not in basesuit:\n if cardB in spades:\n spadessort += [cardB]\n else:\n ranks += [cardB]\n if cardC not in basesuit:\n if cardC in spades:\n if (cardB in spades) and (spades.index(cardC) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardC)\n elif (cardB in spades) and (spades.index(cardC) > spades.index(cardB)):\n spadessort += [cardC]\n else:\n spadessort += [cardC]\n else:\n ranks += [cardC]\n if cardD not in basesuit:\n if cardD in spades:\n if (cardB in spades) and (cardC in spades):\n if (spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB)):\n spadessort = listinsert(spadessort, 0, cardD)\n elif ((spades.index(cardD) < spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB))) or ((spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) < spades.index(cardB))):\n spadessort = listinsert(spadessort, 1, cardD)\n elif (spades.index(cardD) > spades.index(cardC)) and (spades.index(cardD) > spades.index(cardB)):\n spadessort += [cardD]\n elif (cardB in spades) and (cardC not in spades):\n if spades.index(cardD) < spades.index(cardB):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardB):\n spadessort += [cardD]\n elif (cardB not in spades) and (cardC in spades):\n if spades.index(cardD) < spades.index(cardC):\n spadessort = listinsert(spadessort, 0, cardD)\n if spades.index(cardD) > spades.index(cardC):\n spadessort += [cardD]\n else:\n spadessort += [cardD]\n else:\n ranks += [cardD]\n ranks = spadessort + ranks\n return(ranks)", "def get_user_list(question):\n return [int(s) for s in input(question).split()]", "def create_input_list(prompt):\n list_countries = input(prompt).split(\", \")\n list_countries = [x.lower() for x in list_countries] \n return list_countries", "def parseCards(self, raw_cards: dict, cardsList: list):\r\n cards = {}\r\n toParse = list(cardsList)\r\n for catagory in raw_cards:\r\n for (name, subcat) in {name:catagory[name] for name in catagory if name != \"name\"}.items():\r\n for card in subcat:\r\n if(card[\"name\"]) in cardsList:\r\n toParse.remove(card[\"name\"])\r\n cards[card[\"name\"]] = card\r\n if(len(toParse) > 0):\r\n raise Exception(f\"Could not find cards {toParse}. List was {cardsList}\")\r\n return cards", "def is_valid(user_input, card_type=None, skip=False):\n \n i = user_input.upper()\n if i == 'Q':\n exit(\"\\nExiting program. Thanks for using Clue Detective!\\n\")\n if skip:\n if i == 'X':\n return True\n if card_type:\n key_list = [key for key in Board.input_decoder \n if Board.input_decoder[key].type == card_type]\n if i in key_list:\n return True\n elif not card_type:\n if i in Board.input_decoder:\n return True \n else:\n return False", "def __init__ ( self ):\n \n self.__deck = []\n \n for i in range(0,7):\n self.__deck.append('1')\n \n for i in range(0,10):\n self.__deck.append('2')\n \n for i in range(0,3):\n self.__deck.append('3')\n \n #appends the event cards using the first 3 letters of the card in all caps\n self.__deck.append('SEA')\n self.__deck.append('HER')\n self.__deck.append('VIC')\n self.__deck.append('PIL')\n self.__deck.append('TRU')", "def cards(\n self,\n cards: Union[List[Tuple[int, str, str]], List[Any]]\n ) -> None:\n self._cards: List[List[Tuple[int, str, str]]] = [cards]", "def followUpAttack(self, validCards):\n print(\"Select card from... \")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n while card not in validCards: # error checking\n print(card)\n print(\"Please select a valid card from...\")\n cardManager.printHand(validCards)\n card = int(input(\"to your attack: \"))\n self.currentHand.remove(card)\n card = self.checkDoubles(card)\n return card", "async def parse_input_args_filters(ctx, commands, args) -> (discord.Member, bool, str, list, list, list):\n user = None\n has_all = False\n group_by_key = 'set_code'\n affiliation_names = []\n rarity_codes = []\n card_codes = []\n\n # Parse all the arguments\n for arg in args:\n # Check if the argument is a user\n try:\n converter = commands.MemberConverter()\n user = await converter.convert(ctx=ctx, argument=arg)\n # Check if the argument is an affiliation\n except commands.errors.MemberNotFound:\n argLowerCase = arg.lower()\n if argLowerCase == 'all':\n has_all = True\n elif argLowerCase in ['a', 'affiliation', 'affiliations']:\n group_by_key = 'affiliation_name'\n elif argLowerCase in ['f', 'faction', 'factions']:\n group_by_key = 'faction_name'\n elif argLowerCase in ['rar', 'rarity']:\n group_by_key = 'rarity_code'\n elif argLowerCase in ['nogroup', 'nogroups']:\n group_by_key = ''\n elif argLowerCase in ['v', 'villain', 'villains']:\n affiliation_names.append('Villain')\n elif argLowerCase in ['h', 'hero', 'heroes']:\n affiliation_names.append('Hero')\n elif argLowerCase in ['n', 'neutral', 'neutrals']:\n affiliation_names.append('Neutral')\n elif argLowerCase in ['s', 'starter', 'starters']:\n rarity_codes.append('S')\n elif argLowerCase in ['c', 'common']:\n rarity_codes.append('C')\n elif argLowerCase in ['u', 'uncommon']:\n rarity_codes.append('U')\n elif argLowerCase in ['r', 'rare']:\n rarity_codes.append('R')\n elif argLowerCase in ['l', 'legendary']:\n rarity_codes.append('L')\n elif is_valid_card_number_format(arg):\n card_codes.append(arg)\n else:\n raise ValueError('Invalid argument: {}'.format(arg))\n\n if card_codes and (has_all or affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. You can\\'t mix card numbers and batch.')\n elif has_all and (affiliation_names or rarity_codes):\n raise ValueError('Invalid arguments. Use either \\\"all\\\" or affiliation/rarity name but not both.')\n\n return user, has_all, group_by_key, affiliation_names, rarity_codes, card_codes", "def encode_action_discard(play_list):\n action_id_list = []\n for play in play_list:\n # encode the cards in plays into individual action id\n if len(play) == 0:\n action_id_list.append(0)\n continue\n\n cards_have_same_value = True\n for c in play:\n if c.value != play[0].value:\n cards_have_same_value = False\n action = 0\n if len(play) == 1:\n # single\n if play[0].suit != '':\n # remove the option of discardings the Joker\n suit_num = suits.index(play[0].suit)\n action = suit_num * 13 + play[0].value - 1\n action += 1\n elif len(play) == 2 and cards_have_same_value:\n # double\n if play[0].suit != '':\n # remove the option of discardings the Joker\n suits_temp = [play[0].suit, play[1].suit]\n suits_temp.sort()\n suit_num = double_combination.index(suits_temp)\n action = suit_num * 13 + play[0].value - 1\n action += 53\n elif len(play) == 3 and cards_have_same_value:\n # triple\n suits_temp = [play[0].suit, play[1].suit, play[2].suit]\n suits_temp.sort()\n suit_num = triple_combination.index(suits_temp)\n action = suit_num * 13 + play[0].value - 1\n action += 131\n elif len(play) == 4 and cards_have_same_value:\n # quadruple\n action = play[0].value - 1\n action += 183\n elif len(play) == 3:\n # straight of 3\n suit_num = suits.index(play[0].suit)\n action = suit_num * 11 + play[0].value - 1\n action += 196\n elif len(play) == 4:\n # straight of 4\n suit_num = suits.index(play[0].suit)\n action = suit_num * 10 + play[0].value - 1\n action += 240\n elif len(play) == 5:\n # straight of 5\n suit_num = suits.index(play[0].suit)\n action = suit_num * 9 + play[0].value - 1\n action += 280\n elif len(play) == 6:\n # straight of 6\n suit_num = suits.index(play[0].suit)\n action = suit_num * 8 + play[0].value - 1\n action += 316\n action_id_list.append(action)\n return action_id_list", "def process_card_fulfillment_list(card_fulfillment_list):\n processed_card_fulfillments = []\n\n for fulfillment in card_fulfillment_list:\n try:\n user_profile = fulfillment.profile.user.get_profile()\n except ObjectDoesNotExist:\n # Can do better when this fn is moved out of CC3 core, but for now:\n # get_profile() does work in User is inactive, so use the CC3Profile\n # and use dummy values where not available\n user_profile = fulfillment.profile\n user_profile.num_street = '-'\n user_profile.extra_address = '-'\n user_profile.is_stadlander_sso_user = None\n user_profile.individual_profile = None\n trans_dict = SortedDict()\n trans_dict['name'] = user_profile.name\n trans_dict['address'] = user_profile.address\n trans_dict['num_street'] = user_profile.num_street\n trans_dict['extra_address'] = user_profile.extra_address\n trans_dict['postal_code'] = user_profile.postal_code\n trans_dict['city'] = user_profile.city\n trans_dict['email'] = user_profile.user.email\n if user_profile.is_stadlander_sso_user is not None:\n trans_dict['is_stadlander_sso_user'] = user_profile.is_stadlander_sso_user()\n else:\n trans_dict['is_stadlander_sso_user'] = '-'\n if user_profile.individual_profile is not None:\n trans_dict['iban'] = user_profile.individual_profile.iban\n trans_dict['bic_code'] = user_profile.individual_profile.bic_code\n trans_dict['account_holder'] = user_profile.individual_profile.account_holder\n else:\n trans_dict['iban'] = '-'\n trans_dict['bic_code'] = '-'\n trans_dict['account_holder'] = '-'\n trans_dict['creation_date'] = u\"{0} {1}\".format(\n date_format(fulfillment.card_registration.creation_date, use_l10n=True),\n time_format(fulfillment.card_registration.creation_date, use_l10n=True))\n trans_dict['registration_choice'] = fulfillment.card_registration.registration_choice\n trans_dict['status'] = fulfillment.status\n\n processed_card_fulfillments.append(trans_dict)\n\n headings = [_('Name'), _('Address'), _('Num Street'), _('Extra Address'), _('Postal Code'), _('City'), _('Email'),\n _('Stadlander'), _('IBAN'), _('BIC Code'), _('Account Holder'), _('Creation Date'), _('Send / Old'),\n _('Status')]\n return processed_card_fulfillments, headings", "def change_cards(list_of_player):\t#list_of_player= list of cards, of all players\n\tlst=[]\n\tcard=[]\n\twhile len(my_deck)>=3:\t\t#to check if the remaining cards are more than 3 in deck \n\t\tfor i in range(4):\n\t\t\task=input(\"do you want to replace any card/s player %d [y/n]:\" %i)\n\t\t\tif ask=='y':\n\t\t\t\tprint(\"how many card/s you want to replace (valid upto 3) ?\")\n\t\t\t\tcards_to_replace=int(input())\n\t\t\t\tif cards_to_replace<=3:\t\t\t#if user enters upto 3 cards then proceed further\n\t\t\t\t\tprint(\"enter card/s you want to replace\")\n\t\t\t\t\tfor j in range(cards_to_replace):\n\t\t\t\t\t\tprint(\"please enter card number:\")\n\t\t\t\t\t\tnum=int(input())\n\t\t\t\t\t\tprint(\"please enter card type i.e 'Black','Spade' etc.:\")\n\t\t\t\t\t\tcard_type=input()\n\t\t\t\t\t\tcard.append((num,card_type))\n\t\t\t\t\tlist_of_player[i]=replace_card( list_of_player[i], card)\t#calling replace_card function\n\t\t\t\t\tprint(\"updated card list of player %d\" %i,list_of_player[i])\n\t\t\t\telse:\n\t\t\t\t\tprint(\"at a time, you can replace upto 3 cards only! \")\n\t\t\telse:\n\t\t\t\tlst.append('n')\n\t\tif len(lst)>=4:\n\t\t\tbreak\n\treturn print_cards(list_of_player)", "def read_input(self) -> None:\n raw_input = sys.stdin.read()\n\n self.input = raw_input.split('\\n')\n self.input = self.input[0:-1]\n\n self.packets = []\n for item in self.input:\n if item:\n self.packets.append(literal_eval(item))", "def __init__(self):\n self.cards = [Card(face=card[0], value=card[1], suit=suit)\n for card in CARD_VALUES().items() for suit in CARD_SUITS()]", "def decode_action_discard(action):\n discard = []\n # find the cards behind the action number\n # 52(single)+78(double)+52(triple)+13(quadruple)+44(staight3)+40(staight4)+36(staight5)+32(6)\n # card ranges from 1 to 13, suit ranges from CDHS\n if action <= 52:\n # single\n action -= 1\n rank = action % 13 + 1\n suit = suits[int(action/13)]\n discard = [Card(rank, suit)]\n elif action <= 130:\n # double\n action -= 53\n rank = action % 13 + 1\n suit1 = double_combination[int(action/13)][0]\n suit2 = double_combination[int(action/13)][1]\n discard = [Card(rank, suit1), Card(rank, suit2)]\n elif action <= 182:\n # triple\n action -= 131\n rank = action % 13 + 1\n suit1 = triple_combination[int(action/13)][0]\n suit2 = triple_combination[int(action/13)][1]\n suit3 = triple_combination[int(action/13)][2]\n discard = [Card(rank, suit1), Card(rank, suit2), Card(rank, suit3)]\n elif action <= 195:\n # quadruple\n action -= 183\n rank = action + 1\n discard = [Card(rank, \"Clubs\"), Card(rank, \"Diamonds\"), Card(rank, \"Hearts\"), Card(rank, \"Spades\")]\n elif action <= 239:\n # straight of 3\n action -= 196\n suit = suits[int(action/11)]\n rank = action % 11 + 1\n discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit)]\n elif action <= 279:\n # straight of 4\n action -= 240\n suit = suits[int(action/10)]\n rank = action % 10 + 1\n discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit)]\n elif action <= 315:\n # straight of 5\n action -= 280\n suit = suits[int(action/9)]\n rank = action % 9 + 1\n discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit), Card(rank + 4, suit)]\n elif action <= 347:\n # straight of 6\n action -= 316\n suit = suits[int(action/8)]\n rank = action % 8 + 1\n discard = [Card(rank, suit), Card(rank + 1, suit), Card(rank + 2, suit), Card(rank + 3, suit), Card(rank + 4, suit), Card(rank + 5, suit)]\n return discard", "def ui_input() -> str:\n return input('Enter cards numbers(spaces needed): ')", "def process_messages(deck_of_cards, list_of_messages, method):\n output_message = []\n for string in list_of_messages: \n output_message.append(clean_message(string))\n\t\n if method == ENCRYPT:\n for i in range(len(output_message)):\n letter = \"\"\n for string in list_of_messages[i]:\n keystream_value = get_next_keystream_value(deck_of_cards)\n letter = letter + encrypt_letter(string, keystream_value)\n output_message[i] = letter\n\t \n elif method == DECRYPT:\n for i in range(len(output_message)):\n letter = \"\"\n for string in list_of_messages[i]:\n keystream_value = get_next_keystream_value(deck_of_cards)\n letter = letter + decrypt_letter(string, keystream_value)\n output_message[i] = letter\n return output_message\n # Two cases where if method is ENCRYPT or DECRYPT. if DECRYPT uses\n # decrypt_letter method. If ENCRYPT uses encrypt_letter method.", "def makeList(username, url, caseSensitive = False, wildCards = True):\n charList = []\n for ch in lower:\n # check for ch in \n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in numbers:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in special:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n for ch in other:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(caseSensitive):\n for ch in upper:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n if(wildCards):\n for ch in wildcards:\n if(checkPasswordCharacter(str(ch), username, url)):\n charList.append(str(ch))\n print(ch)\n return charList", "def deliver_card(data, access=None):\n\n schema = get_card_schema(data)\n if not schema:\n schema = card_schema\n data = deepcopy(data)\n\n if access is 'learn' and data['kind'] is 'choice':\n if data['order'] == 'random':\n shuffle(data['options'])\n\n if data['max_options_to_show']:\n data['options'] = data['options'][:data['max_options_to_show']]\n\n return deliver_fields(schema, data, access)", "def convert_str_encoded_cards_to_int_encoded(cards: List[str]) -> List[int]:\n return [card_ids[card] for card in cards]", "def decode_list(self, tokens: list) -> str:\r\n return NotImplementedError", "def get_data_from_user(questions, answers_types, id_storage, id_, is_alpha):\n user_data = []\n\n for i in range(len(questions)):\n user_input = None\n\n while type(user_input) != answers_types[i]:\n user_input = ui.get_inputs([questions[i]], '')[0]\n user_input = get_correct_data_types(user_input, answers_types[i], is_alpha[i])\n\n # Other differences while asking for data here\n\n user_data.append(user_input)\n\n user_data = [str(record) for record in user_data]\n\n return user_data", "def __init__(self):\r\n self.cards = []" ]
[ "0.6567946", "0.63332915", "0.5622721", "0.5523507", "0.5515353", "0.5509751", "0.54955375", "0.54447144", "0.53823245", "0.53788084", "0.5369966", "0.5357542", "0.5334989", "0.5311229", "0.5299947", "0.52394986", "0.52083004", "0.52079797", "0.5192435", "0.5192203", "0.51900476", "0.5180001", "0.51566446", "0.51346654", "0.5095058", "0.50946426", "0.5068487", "0.5064969", "0.50589585", "0.50569004" ]
0.7617722
0
Return tuple with name and symbol order of symbol table node gdbval
def get_symbol_name_order(gdbval): return (symtab_node_name (gdbval), int(gdbval["order"]))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def build_gdb_symbol_table():\n\n tab = Symtab()\n n = gdb.parse_and_eval (\"symtab->nodes\")\n while (long(n)):\n if symtab_node_is_function (n):\n current_symbol = GdbFunction(tab, n)\n tab.all_functions.append (current_symbol)\n elif symtab_node_is_variable (n):\n current_symbol = GdbVariable(tab, n)\n tab.all_variables.append (current_symbol)\n else:\n raise gdb.GdbError (\"Encountered an unknown symbol table node\");\n\n tab.order_to_sym[current_symbol.order] = current_symbol\n tab.all_symbols.append (current_symbol)\n\n n = n[\"next\"]\n pass\n\n tab.fixup()\n return tab", "def getSymbolTable(self) -> ghidra.app.util.bin.format.pe.debug.DebugCodeViewSymbolTable:\n ...", "def getSymbolValue(self) -> int:\n ...", "def parse_symbol_table(data, sections, elf_header):\n if is64bit(elf_header):\n symbol_entry_str = symbol_64_entry_str\n symbol_entry_spec = symbol_64_entry_spec\n else:\n symbol_entry_str = symbol_32_entry_str\n symbol_entry_spec = symbol_32_entry_spec\n entry_len = struct.calcsize(symbol_entry_str)\n \n st_offset = None\n if \".symtab\" in sections:\n section = \".symtab\"\n if \".strtab\" in sections:\n st_offset = sections[\".strtab\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n elif \".dynsym\" in sections:\n section = \".dynsym\"\n if \".dynstr\" in sections:\n st_offset = sections[\".dynstr\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n \n if section not in sections:\n return {}, {} \n \n symbols = {}\n imports = {}\n offset = sections[section][\"offset\"]\n size = sections[section][\"size\"]\n index = offset\n while index < offset + size:\n vals = {}\n if len(data) < index+entry_len: \n break\n \n val_data = struct.unpack(symbol_entry_str, data[index:index+entry_len])\n for i, elem in enumerate(symbol_entry_spec):\n vals[elem[0]] = val_data[i]\n \n if st_offset is None:\n symbols[vals[\"name\"]] = vals\n else:\n func_name = get_name_from_string_table(data, st_offset, vals[\"name\"])\n if func_name:\n vals.pop(\"name\")\n vals[\"info\"] = get_symbol_info(vals[\"info\"])\n vals[\"shndx\"] = get_symbol_shndx(vals[\"shndx\"])\n \n if vals[\"info\"] == \"UNDEFINED\" and vals[\"value\"] == 0:\n tmp_name = func_name\n import_name = \"Unknown\"\n if \"@@\" in func_name:\n i = tmp_name.find(\"@@\")\n func_name = tmp_name[:i]\n import_name = tmp_name[i:].strip(\"@@\") \n if import_name not in imports:\n imports[import_name] = {}\n imports[import_name][func_name] = vals\n symbols[func_name] = vals\n \n index += entry_len \n \n return symbols, imports", "def _GetSymbol(atom):\n ks = atom.keys()\n if 'sym' in ks:\n return atom['sym']\n\n for k in ks:\n if k not in PROTECTED_KEYS and isinstance(atom[k], list):\n if len(atom[k]) == 3:\n return k\n\n raise ValueError", "def parse_symbol(line):\n def _parse_elfsymbol(line):\n # First try the standard ELF symbol table encoding.\n match = re.match(r'^(\\S+)\\s(.{7})\\s(\\S+)\\s(\\S+)\\s(.+)$', line)\n if match:\n return ELFSymbol(*match.groups())\n # Failing that, try the bastardised Mach-O symbol table encoding.\n match = re.match(r'^(\\S+)\\s(.{7})\\s(\\S+)\\s(.+)$', line)\n if match:\n return ELFSymbol(match.group(1), match.group(2), match.group(3), '0', match.group(4))\n return None\n\n def _parse_othersymbol(line):\n \"\"\"\n [ 4](sec 3)(fl 0x00)(ty 0)(scl 3) (nx 1) 0x00000000 .bss\n [ 6](sec 1)(fl 0x00)(ty 0)(scl 2) (nx 0) 0x00000000 fred\n\n where the number inside the square brackets is the number of the entry in\n the symbol table, the sec number is the section number, the fl value are the\n symbol's flag bits, the ty number is the symbol's type, the scl number is\n the symbol's storage class and the nx value is the number of auxilary\n entries associated with the symbol. The last two fields are the symbol's\n value and its name.\n \"\"\"\n return None\n\n if not line:\n return None\n # Return first successful parsing.\n sym = _parse_elfsymbol(line)\n if sym is not None:\n return sym\n return _parse_othersymbol(line)", "def show_symbol_table(st):\n print(st)\n # Dump the name lists get_*()\n if isinstance(st, symtable.Function):\n for nlist in _NAME_LISTS:\n names = getattr(st, \"get_\"+nlist)()\n if names:\n print(' {} : {!r}'.format(nlist, names))\n # Dump the properties as short names is_global -> global, etc..\n for s in st.get_symbols():\n scope = to_scope_name(s._Symbol__scope)\n props = [scope]\n for p in _NAME_PROPS:\n if getattr(s, \"is_\"+p)():\n props.append(p)\n print(' \"{}\" : {}'.format(s.get_name(), ', '.join(props)))", "def get_symbols(self, type_name):\n return self._symtab[type_name].get_symbols()", "def address(self, symbol):\r\n return self.s_table[symbol]", "def get_symbol(self):\n return []", "def gather_references_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"references\"]\n return [int(i[\"referred\"][\"order\"]) for i in vec_iter(vec)]", "def gather_referring_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"referring\"]\n return [int(i[\"referring\"][\"order\"]) for i in vec_iter(vec)]", "def symbol_table(self) -> str:\n return self._symbol_table", "def obj(self) -> (Symbol, int, int):\n return (self._symbol, self._start, self._end)", "def symbol(self): \n return self.__symbol", "def cur_symbols(self):\n return self.symbols[-1].keys() + self.global_symbols.keys()", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def seperate_symbols(func):\n params = []\n vars = []\n for symbol in func.free_symbols:\n if not str(symbol).isidentifier():\n continue # E.g. Indexed objects might print to A[i, j]\n if isinstance(symbol, Parameter):\n params.append(symbol)\n elif isinstance(symbol, Idx):\n # Idx objects are not seen as parameters or vars.\n pass\n elif isinstance(symbol, (MatrixExpr, Expr)):\n vars.append(symbol)\n else:\n raise TypeError('model contains an unknown symbol type, {}'.format(type(symbol)))\n\n for der in func.atoms(sympy.Derivative):\n # Used by jacobians and hessians, where derivatives are treated as\n # Variables. This way of writing it is purposefully discriminatory\n # against derivatives wrt variables, since such derivatives should be\n # performed explicitly in the case of jacs/hess, and are treated\n # differently in the case of ODEModels.\n if der.expr in vars and all(isinstance(s, Parameter) for s in der.variables):\n vars.append(der)\n\n params.sort(key=lambda symbol: symbol.name)\n vars.sort(key=lambda symbol: symbol.name)\n return vars, params", "def _key(self):\n return (self.name, self.struct_types, self.struct_values)", "def testSymbolHash(self):\n gScope = pykd.diaLoadPdb( str(target.module.pdb()) )\n symSet = set([ gScope[\"g_structTest\"], gScope[\"EnumWindowsProc1\"], gScope[\"g_structTest\"] ])\n self.assertEqual( 2, len(symSet) )\n self.assertTrue( gScope[\"g_structTest\"] in symSet )\n self.assertFalse( gScope[\"EnumWindowsProc2\"] in symSet )", "def _get_parameters(self):\n return (self.SYMBOL, self.parameterArray())", "def getVisitableNodesNamed(self):\n\n return (\n (\"value\", self.subnode_value),\n (\"dict_arg\", self.subnode_dict_arg),\n (\"key\", self.subnode_key),\n )", "def protocol_names(self):\n\n return tuple([k.name for k in self.query(Protocol).order_by(Protocol.name)])", "def symbols_details(self):\n pass", "def add_symbol_und(self, name):\n label_name = \"symtab_\" + name\n if osarch_is_32_bit():\n self.add_data((\"st_name\", 4, \"strtab_%s - strtab\" % (name)))\n self.add_data((\"st_value\", PlatformVar(\"addr\"), label_name, label_name))\n self.add_data((\"st_size\", PlatformVar(\"addr\"), PlatformVar(\"addr\")))\n self.add_data((\"st_info\", 1, 17))\n self.add_data((\"st_other\", 1, 0))\n self.add_data((\"st_shndx\", 2, 1))\n elif osarch_is_64_bit():\n self.add_data((\"st_name\", 4, \"strtab_%s - strtab\" % (name)))\n self.add_data((\"st_info\", 1, 17))\n self.add_data((\"st_other\", 1, 0))\n self.add_data((\"st_shndx\", 2, 1))\n self.add_data((\"st_value\", PlatformVar(\"addr\"), label_name, label_name))\n self.add_data((\"st_size\", PlatformVar(\"addr\"), PlatformVar(\"addr\")))\n else:\n raise_unknown_address_size()", "def get_symbols(doc, lib):\n\n basename = lib.replace(\".dll\", \"\").lower()\n filename = os.path.join(get_hopper_script_dir(), basename + \".txt\")\n if not os.path.exists(filename):\n doc.log(\"Symbol file not found: %s\" % filename)\n return None\n\n symbols = {}\n with open(filename, \"r\") as fp:\n for i, line in enumerate(fp, 1):\n match = symbol_line.match(line)\n if not match:\n doc.log(\"Skipping line %d: Malformed\" % i)\n continue\n\n ordinal, name = match.group(1), match.group(2)\n if ordinal and name:\n symbols[ordinal] = name\n\n return symbols", "def getVisitableNodesNamed(self):\n\n return (\n (\"dict_arg\", self.subnode_dict_arg),\n (\"value\", self.subnode_value),\n )", "def _DiffVtableComponent(offset, expected_symbol, vtable):\n if offset not in vtable:\n return []\n\n entry = vtable[offset]\n if not entry.names:\n return [hex(entry.value).rstrip('L')]\n\n if expected_symbol not in entry.names:\n return entry.names", "def get_info_in_tuple(self):\r\n return self.key, self.value, self.get_color(), self.size_tree", "def getSymmetries(self, board, pi):\n return [(board, pi), (board[:, ::-1], pi[::-1])]" ]
[ "0.63522923", "0.5892707", "0.5717121", "0.5676088", "0.5654664", "0.5606972", "0.55600977", "0.55475837", "0.5519339", "0.5508528", "0.54735756", "0.54546064", "0.5445889", "0.5444502", "0.53737164", "0.53344953", "0.531329", "0.53073454", "0.52950823", "0.52532065", "0.51968807", "0.5191485", "0.5187899", "0.51663953", "0.51292145", "0.5096961", "0.5095743", "0.5091016", "0.5077221", "0.5044907" ]
0.802892
0
Return pruned candidates containing only flags that are set in gdbval
def bool_attr_list(gdbval, candidates): r = [] for i in candidates: if long (gdbval[i]) != 0: r.append(i) pass pass return r
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bool_attr_list_1(gdbval, candidates):\n\n r = []\n for i in candidates:\n if long (gdbval[i[0]]) != 0:\n r.append(i[1])\n pass\n pass\n return r", "def flags(self):\n flags = self.Flags\n return [x for x in self.FLAGS_VALUES if flags & x]", "def checkallflags(flags_with_values,flags_withoutvalues,cldic):\r\n if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:\r\n print ( \"error some flags appear in two lists of flags, with and without required values:\",set(flags_with_values).intersection(set(flags_without_values)))\r\n printcommandset()\r\n sys.exit(1)\r\n for flag in set(flags_with_values).union(set(flags_withoutvalues)):\r\n if flag not in cldic:\r\n print ( \"error some flag mismatch between strings of flags and dictionary of flags:\",flag)\r\n printcommandset()\r\n sys.exit(1)\r\n return", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def gn_files(self):\n return set(self._gn_flags.keys())", "def unused_featurevalues():\n\n fvs = FeatureValue.objects.filter(feature__active=True)\n unused_fvs = fvs.filter(languages__isnull=True)\n natlang_only_fvs = fvs.filter(languages__language__natlang=True).exclude(languages__language__natlang=False)\n\n if not natlang_only_fvs:\n # Natlangs had no unique features so return early\n return unused_fvs\n\n # dsd\n decorate = ((fv.id, fv) for fv in set(unused_fvs) | set(natlang_only_fvs))\n sort = sorted(decorate)\n return [fv for (_, fv) in sort]", "def flagSet():\r\n for flag in flags:\r\n if flags[flag]:\r\n return True\r\n return False", "def list_set_flags(self, include=frozenset(), exclude=frozenset()):\n for (name, desc) in self.list_flags():\n if name in exclude:\n continue\n if (name in include) or (desc.value is not desc.default):\n yield (name, desc)", "def box_flags(self, box):\n return [flag for flag in self.flags if flag.box == box]", "def _RemoveUnusedFlags( flags, filename ):\n\n new_flags = []\n\n # When flags come from the compile_commands.json file, the first flag is\n # usually the path to the compiler that should be invoked. We want to strip\n # that.\n if not flags[ 0 ].startswith( '-' ):\n flags = flags[ 1: ]\n\n skip = False\n for flag in flags:\n if skip:\n skip = False\n continue\n\n if flag == '-c':\n continue\n\n if flag == '-o':\n skip = True;\n continue\n\n if flag == filename or os.path.realpath( flag ) == filename:\n continue\n\n new_flags.append( flag )\n return new_flags", "def no_flags_set(self):\n # TODO: unit test me\n return not any(\n (\n self.flag_bookmarked,\n self.flag_candidate,\n self.flag_final_causative,\n self.flag_for_validation,\n self.flag_molecular != \"empty\",\n self.flag_visual != \"empty\",\n self.flag_validation != \"empty\",\n self.flag_phenotype_match != \"empty\",\n self.flag_summary != \"empty\",\n )\n )", "def _not_matching(values, sieve):\n return [val for val in values if val not in sieve]", "def prune_gbm_features(schema: Dict):\n gbm_feature_types = ['binary', 'category', 'number']\n pruned_all_of = []\n for cond in schema['items']['allOf']:\n if_type = cond['if']['properties']['type']['const']\n if if_type in gbm_feature_types:\n pruned_all_of += [cond]\n schema['items']['allOf'] = pruned_all_of", "def get_flags(self):\n\n if self.raw.flags not in [0, 1, 2, 3]:\n raise ValueError(\"Invalid raw flags: {}\".format(self.raw.flags))\n\n flags = set()\n\n if (self.raw.flags & 0b010) > 0:\n flags.add(\"DF\")\n\n if (self.raw.flags & 0b001) > 0:\n flags.add(\"MF\")\n\n return frozenset(flags)", "def get_source_candidates(all_data_epigen):\n candids = {s:\n [np.where(np.array(c[1])!=0)[0] for c in mdata[\"test\"] ]\n for s, mdata in all_data_epigen.items()}\n return candids", "def test_removeFlags(self):\n self._flagsTest('removeFlags', b'-FLAGS')", "def flags(self):\n return list(self._flags_generator())", "def resetFlags():\r\n for flag in flags:\r\n flags[flag] = False", "def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]", "def list_flags(self):\n return self._defs.items()", "def _computesuspendedset(repo):\n suspended = repo.changelog.ancestors(getrevs(repo, 'unstable'))\n return set(r for r in getrevs(repo, 'obsolete') if r in suspended)", "def remove_invalid_flags(flags):\n\n # Validate flag format\n filtered_flags = [flag for flag in flags if is_valid_flag(flag)]\n\n number_of_removed_flags = len(flags) - len(filtered_flags)\n\n if number_of_removed_flags:\n log.failure(\"Removed {} flags with incorrect format.\".format(number_of_removed_flags))\n\n return filtered_flags", "def get_mutations_we_miss(quality_cutoff):\n\n murim_mutations = get_murim_mutations(quality_cutoff)\n my_mutations = get_my_mutations(quality_cutoff)\n\n missing = {}\n for chrpos in murim_mutations:\n if chrpos not in my_mutations:\n missing[chrpos] = True\n return missing", "def get_all_flags(options):\n flags = []\n if options.inputFlag:\n flags.append(try_to_int(options.inputFlag))\n if options.outputFlags:\n for flag in options.outputFlags:\n flags.append(try_to_int(flag))\n return flags", "def prune_option_list(opts, keys):\n opt_d = opt_to_dict(opts)\n for k in keys:\n if k in opt_d:\n del opt_d[k]\n return [k for item in opt_d.iteritems() for k in item]", "def get_flagged(self):\n self.cur.execute(\"SELECT video_ID FROM flags\")\n flagged_IDs = []\n for ID in self.cur.fetchall():\n flagged_IDs.append(ID[0])\n return flagged_IDs", "def core_mask(self):\n mask = []\n for atom in self.atoms:\n if \"shell\" not in atom.atom_type.label:\n mask.append(True)\n else:\n mask.append(False)\n return mask", "def get_eval_flag_dict(eval_mode):\n\n # Base dictionary with all flags set to True.\n dict = {}\n for key in EVAL_FLAGS:\n dict[key] = True\n\n # Auto-annotations.\n if eval_mode == \"draft\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_trna\"] = False\n dict[\"import_locus_tag\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_coords\"] = False\n\n # Manual annotations.\n elif eval_mode == \"final\":\n dict[\"import_locus_tag\"] = False\n\n # SEA-PHAGES GenBank records.\n elif eval_mode == \"auto\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_description_field\"] = False\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n dict[\"check_coords\"] = False\n\n # Non-SEA-PHAGES GenBank records.\n elif eval_mode == \"misc\":\n dict[\"check_locus_tag\"] = False\n # TODO below should probably be True, but it causes problems\n # when checking the current genome, GNM2_001, since these are not 'draft'\n # genomes.\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n\n # Custom QC settings. User can select the settings, so it is initialized as\n # a copy of the base eval_mode. The user can provide the\n # customized combination of options.\n elif eval_mode == \"custom\":\n for key in dict.keys():\n prompt = f\"Eval_flag: {key}. {EVAL_FLAGS[key]}\"\n response = basic.ask_yes_no(prompt=prompt, response_attempt=3)\n if response is None:\n print(\"The default setting for this eval_flag will be used.\")\n else:\n dict[key] = response\n\n elif eval_mode == \"base\":\n pass\n else:\n print(\"A valid eval_mode has not been selected.\")\n return dict", "def generateCandidates(self):\n\t\tprint(\"Candidate list:\\n\")\n\t\tkeys = list(self.prune_list.keys())\n\t\ttuple_count = len(keys[0])\n\t\tprune_list = {}\n\t\ttup = []\n\t\tfor v in comb(keys, 2):\n\t\t\ta = set(v[0])\n\t\t\tb = set(v[1])\n\t\t\t\n\t\t\t# If there's as many common element in a & b as one less than tuple_count\n\t\t\tif((len(a & b) == (tuple_count - 1)) and (tuple(a | b) not in tup)):\n\t\t\t\ttup.append(tuple(a | b))\n\t\t\t\tprint(tup[-1])\n\t\t\t\t# Update prune list\n\t\t\t\tcount = self.getSupport(tup[-1])\n\t\t\t\tif(count >= self.support):\n\t\t\t\t\tprune_list[tup[-1]] = count\n\t\treturn prune_list", "def get_our_extra_mutations(quality_cutoff):\n\n murim_mutations = get_murim_mutations(quality_cutoff)\n my_mutations = get_my_mutations(quality_cutoff)\n\n missing = {}\n for chrpos in my_mutations:\n if chrpos not in murim_mutations:\n missing[chrpos] = True\n return missing" ]
[ "0.62069196", "0.58500195", "0.5761298", "0.56516194", "0.54694813", "0.54386127", "0.5421935", "0.5417712", "0.54035455", "0.53730446", "0.53225636", "0.53167844", "0.5297252", "0.529056", "0.528956", "0.5206268", "0.5202388", "0.5171703", "0.51404", "0.51187575", "0.50934035", "0.5082096", "0.5072761", "0.50598204", "0.5057428", "0.50534576", "0.50418806", "0.5031735", "0.5023706", "0.4994039" ]
0.62399405
0
To be made method. Loads common attributes from symbol base
def load_symtab_base_attrs(self): sym = self.gdbval vis = bool_attr_list (sym, ["in_other_partition", "used_from_other_partition", "force_output", "forced_by_abi", "externally_visible"]) vis.extend(bool_attr_list_1(sym["decl"]["base"], [("asm_written_flag", "asm_written"), ("public_flag", "public")])) vis.extend(bool_attr_list_1(sym["decl"]["decl_common"], [("decl_flag_1", "external"), ("virtual_flag", "virtual"), ("artificial_flag", "artificial")])) vis.extend(bool_attr_list_1(sym["decl"]["decl_with_vis"], [("common_flag", "common"), ("weak_flag", "weak"), ("dllimport_flag", "dll_import"), ("comdat_flag", "comdat"), # ("x_comdat_group", "one_only"), ("visibility_specified", "visibility_specified")])) # cg = sym["decl"]["decl_with_vis"]["comdat_group"] # if (long(cg) != 0): # vis.append("comdat_group:%s" % tree_get_identifier_str (cg)) # pass # sn = sym["decl"]["decl_with_vis"]["section_name"] # if (long(sn) != 0): # vis.append("section_name:%s" & sn["string"]["str"].string()) # pass visnum = long(sym["decl"]["decl_with_vis"]["visibility"]) if visnum != 0: visnames = gdb.parse_and_eval("visibility_types") vistype_str = visnames[visnum].string() vis.append("visibility:%s" % vistype_str) pass resnum = int(sym["resolution"]) if resnum != 0: resnames = gdb.parse_and_eval("ld_plugin_symbol_resolution_names") res_str = resnames[resnum].string() vis.append(res_str) pass self.visibility = vis if sym["address_taken"]: self.address_taken = True pass return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _base_attrs(self, service):\n keys = ['name', 'desc', 'url']\n return {name:getattr(service, name, None) for name in keys}", "def __extract_common_attrs(self, raw_data: Dict) -> None:\n for attr in self.COMMON_ATTRS:\n if attr not in self.ATTRS and attr in raw_data:\n setattr(self, attr, raw_data[attr])", "def load_attribute_data():\n global attr_value_counts, attr_counts, value_counts, \\\n attr_value_ratios, attrs\n\n print \"Loading extraction data...\"\n with open('./data/common_extractions.json') as f:\n place_data = json.loads(f.read())\n for place in place_data:\n for attr in place_data[place]:\n if attr not in attr_value_counts:\n attrs.add(attr)\n attr_value_counts[attr] = {}\n attr_counts[attr] = 0\n for value in place_data[place][attr]:\n c = place_data[place][attr][value]\n value_counts[value] = value_counts.get(value, 0) + c\n attr_counts[attr] += c\n attr_value_counts[attr][value] = \\\n attr_value_counts[attr].get(value, 0) + c\n \n for attr in attrs:\n attr_value_ratios[attr] = {}\n for value in attr_value_counts[attr]:\n attr_value_ratios[attr][value] = float(attr_value_counts[attr][value]) \\\n / attr_counts[attr]", "def inherit_attributes(self, attr_table, base_ext_type):\n base_attrs = base_ext_type.attribute_table.attributedict\n attr_table.inherited.update(base_attrs) # { attr_name }\n attr_table.attributedict.update(base_attrs) # { attr_name : attr_type }", "def _load_attributes(self, force=False):\n # All attributes to be loaded, except _namespaces, which is a special\n # case because it requires additional params in the API query:\n attrs = [self._name, self._project, self._lang, self._base_url,\n self._article_path, self._script_path]\n\n params = {\"action\": \"query\", \"meta\": \"siteinfo\", \"siprop\": \"general\"}\n\n if not self._namespaces or force:\n params[\"siprop\"] += \"|namespaces|namespacealiases\"\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n self._load_namespaces(result)\n elif all(attrs): # Everything is already specified and we're not told\n return # to force a reload, so do nothing\n else: # We're only loading attributes other than _namespaces\n with self._api_lock:\n result = self._api_query(params, no_assert=True)\n\n res = result[\"query\"][\"general\"]\n self._name = res[\"wikiid\"]\n self._project = res[\"sitename\"].lower()\n self._lang = res[\"lang\"]\n self._base_url = res[\"server\"]\n self._article_path = res[\"articlepath\"]\n self._script_path = res[\"scriptpath\"]", "def load_attrs(self):\n return loads(self.get_attr().GetObject()) or {}", "def load_vso_values():\n from sunpy.net import attrs as a\n\n here = os.path.dirname(os.path.realpath(__file__))\n with open(os.path.join(here, 'data', 'attrs.json')) as attrs_file:\n keyword_info = json.load(attrs_file)\n\n # Now to traverse the saved dict and give them attr keys.\n attrs = {}\n for key, value in keyword_info.items():\n attr = getattr(a, key.capitalize(), None)\n if attr is None:\n attr = getattr(a.vso, key.capitalize())\n attrs[attr] = value\n return attrs", "def read_extras(self, f):\n\n gb = f[\"base state\"]\n for name in gb:\n self.base[name] = Basestate(self.cc_data.grid.ny, ng=self.cc_data.grid.ng)\n self.base[name].d[:] = gb[name]", "def get_implementation_specific_attrs(cls):\n ctypes_version = cls.get_drmaa2_library().drmaa2_get_drmaa_version();\n if cls.implementation_specific_attrs is None:\n cls.implementation_specific_attrs = cls.to_py_dict(\n cls.get_drmaa2_library().uge_vi_impl_spec_get(ctypes_version))\n return cls.implementation_specific_attrs", "def _load_symbol_data(self, name):\n if self._state[name] in (True, None): # Skip Symbols already loaded\n return\n\n # Unpack attributes\n attrs = self._state[name]['attrs']\n index, dim, domain, records = [attrs[k] for k in ('index', 'dim',\n 'domain', 'records')]\n\n # Read the data\n self._cache_data(name, index, dim, records)\n\n # If the GAMS method 'sameas' is invoked in a program, the resulting\n # GDX file contains an empty Set named 'SameAs' with domain (*,*). Do\n # not read this\n if name == 'SameAs' and domain == ['*', '*']:\n self._state[name] = None\n self._index[index] = None\n return\n\n domain = self._infer_domain(name, domain,\n self._state[name]['elements'])\n\n # Create an xr.DataArray with the Symbol's data\n self._add_symbol(name, dim, domain, attrs)", "def __getattribute__(self, name):\n if name == \"_update_header\":\n return object.__getattribute__(self, \"_update_header\")\n\n elif name == \"_get_lib\":\n return object.__getattribute__(self, \"_get_lib\")\n\n else:\n # Load symbols from the current process (Python).\n return getattr(self._get_lib(), name)", "def attributes(self):\n ...", "def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True) # Uses CCDSUM\n self.meta['detector']=dict(ext=0,card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=1, card='DATASEC')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')", "def attributes(self):", "def _init_attributes(self):\n if os.name == \"nt\":\n if \"64\" in platform.architecture()[0]:\n platform_arch = \"x86_64\"\n elif \"32\" in platform.architecture()[0]:\n platform_arch = \"i386\"\n else:\n platform_arch = platform.architecture()\n os_ver = f\"Windows-{platform.win32_ver()[1]}\"\n else:\n platform_arch = platform.machine()\n if platform.system() == \"Darwin\":\n os_ver = f\"macOS-{platform.mac_ver()[0]}\"\n else:\n os_ver = \"-\".join(linux_distribution()[0:2])\n\n license_chunks = LICENSE.split(\" \")\n if license_chunks[0] == \"GPLv2\":\n client_license = \"GPL-2.0\"\n else:\n client_license = \"Commercial\"\n\n default_attributes = {\n # Process id\n \"_pid\": str(os.getpid()),\n # Platform architecture\n \"_platform\": platform_arch,\n # OS version\n \"_os\": os_ver,\n # Hostname of the local machine\n \"_source_host\": socket.gethostname(),\n # Client's name\n \"_client_name\": \"mysql-connector-python\",\n # Client's version\n \"_client_version\": \".\".join([str(x) for x in VERSION[0:3]]),\n # Client's License identifier\n \"_client_license\": client_license,\n }\n self._settings[\"attributes\"].update(default_attributes)\n\n if \"connection-attributes\" in self._settings:\n for attr_name in self._settings[\"connection-attributes\"]:\n attr_value = self._settings[\"connection-attributes\"][attr_name]\n # Validate name type\n if not isinstance(attr_name, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' must be a string type\"\n )\n # Validate attribute name limit 32 characters\n if len(attr_name) > 32:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' exceeds 32 characters \"\n \"limit size\"\n )\n # Validate names in connection-attributes cannot start with \"_\"\n if attr_name.startswith(\"_\"):\n raise InterfaceError(\n \"Key names in 'session-connect-attributes' cannot \"\n f\"start with '_', found: {attr_name}\"\n )\n # Validate value type\n if not isinstance(attr_value, str):\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value '{attr_value}' \"\n \" must be a string type\"\n )\n\n # Validate attribute value limit 1024 characters\n if len(attr_value) > 1024:\n raise InterfaceError(\n f\"Attribute name '{attr_name}' value: '{attr_value}' \"\n \"exceeds 1024 characters limit size\"\n )\n\n self._settings[\"attributes\"][attr_name] = attr_value", "def init_attrs(self):\n raise NotImplementedError", "def load(self):\r\n self.domain.get_attributes(self.name, item=self)", "def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='object')\n self.meta['idname'] = dict(ext=0, card='obsmode')\n self.meta['decker'] = dict(ext=0, card='MASKNAME')\n self.meta['binning'] = dict(card=None, compound=True)\n self.meta['detector'] = dict(ext=0, card='detector')\n self.meta['mjd'] = dict(ext=0, card='MJD-OBS')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['datasec'] = dict(ext=0, card='DETSIZE')\n self.meta['dichroic'] = dict(ext=0, card='FILTER1')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')\n self.meta['slitwid'] = dict(card=None, compound=True)", "def _load(self) -> dict:\n raise NotImplementedError()", "def test_all_attributes_in_base_metadata(self):\n\n name = 'idsvc.basemeta'\n uuid = 'ABC'\n value = { 'color': 'blue' }\n owner = 'bob'\n schemaId = None\n internalUsername = None\n associationIds = ['CDE', 'EFG']\n lastUpdated = '2016-06-15T17:09:06.137-05:00'\n name = 'idsvc.basemeta'\n created = '2016-06-15T17:09:06.137-05:00'\n _links = ''\n\n meta = {\n 'name': name,\n 'uuid': uuid,\n 'value': value,\n 'owner': owner,\n 'schemaId': schemaId,\n 'internalUsername': internalUsername,\n 'associationIds': associationIds,\n 'lastUpdated': lastUpdated,\n 'name': name,\n 'created': created,\n '_links': _links\n }\n\n base_meta = BaseMetadata(api_client=self.IDS_SYS_CLIENT, meta=meta)\n self.assertDictEqual(base_meta.meta, meta)", "def init_meta(self):\n self.meta = {}\n # Required (core)\n self.meta['ra'] = dict(ext=0, card='RA')\n self.meta['dec'] = dict(ext=0, card='DEC')\n self.meta['target'] = dict(ext=0, card='OBJECT')\n self.meta['decker'] = dict(ext=0, card=None, default='default')\n self.meta['dichroic'] = dict(ext=0, card=None, default='default')\n self.meta['binning'] = dict(ext=0, card=None, default='1,1')\n\n self.meta['mjd'] = dict(ext=0, card='ACQTIME')\n self.meta['exptime'] = dict(ext=0, card='EXPTIME')\n self.meta['airmass'] = dict(ext=0, card='AIRMASS')\n # Extras for config and frametyping\n self.meta['dispname'] = dict(ext=0, card='GRISM')\n self.meta['idname'] = dict(ext=0, card='OBSTYPE')\n self.meta['instrument'] = dict(ext=0, card='INSTRUME')", "def get_attributes(self) -> Dict[str, str]:\n pass", "def _get_attrs_symbols():\n return {\n 'True', 'False', 'None', # those are identifiers in Python 2.7\n 'self',\n 'parent',\n 'id',\n 'uid',\n 'context',\n 'context_today',\n 'active_id',\n 'active_ids',\n 'allowed_company_ids',\n 'current_company_id',\n 'active_model',\n 'time',\n 'datetime',\n 'relativedelta',\n 'current_date',\n 'abs',\n 'len',\n 'bool',\n 'float',\n 'str',\n 'unicode',\n }", "def load_attributes():\n\n # <attribute_id> <attribute_name>\n attributes_file = open(PROJECT_ROOT +'/data/attributes.txt').readlines()\n attributes_file = [i.strip().split() for i in attributes_file]\n\n # <certainty_id> <certainty_name>\n certainties_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/certainties.txt').readlines()\n certainties_file = [i.strip().split() for i in certainties_file]\n\n # <image_id> <attribute_id> <is_present> <certainty_id> <time>\n labels_file = open(PROJECT_ROOT +'/data/CUB_200_2011/attributes/image_attribute_labels.txt').readlines()\n labels_file = [i.strip().split() for i in labels_file]\n\n attribute_ids = {}\n for i in attributes_file:\n attribute_ids[i[1]] = int(i[0])\n\n certainty_ids = {}\n for i in certainties_file:\n certainty_ids[i[1]] = int(i[0])\n\n label_ids = {}\n for i in labels_file:\n label_ids[(int(i[0]), int(i[1]))] = list(map(lambda x:int(float(x)), i[2:]))\n\n return attribute_ids, certainty_ids, labels_file, label_ids", "def load_constants():\r\n marker_dictionary = dict()\r\n marker_dictionary[\"SP\"] = SP\r\n marker_dictionary[\"LCL\"] = LCL\r\n marker_dictionary[\"ARG\"] = ARG\r\n marker_dictionary[\"THIS\"] = THIS\r\n marker_dictionary[\"THAT\"] = THAT\r\n marker_dictionary[\"SCREEN\"] = SCREEN\r\n marker_dictionary[\"KBD\"] = KBD\r\n for i in range(0, RAM_RESERVE_END):\r\n marker_dictionary[\"R\"+str(i)] = i\r\n return marker_dictionary", "def build_attrs(self, base_attrs, extra_attrs=None, **kwargs):\n attrs = dict(base_attrs, **kwargs)\n if extra_attrs:\n attrs.update(extra_attrs)\n return attrs", "def getInitParams(self):\n paramDict = super().getInitParams()\n paramDict['base'] = self.base\n return paramDict", "def _extract_attributes(modpath, respect_all=True):\r\n try:\r\n if six.PY2:\r\n with open(modpath, 'r') as file:\r\n source = file.read()\r\n else:\r\n with open(modpath, 'r', encoding='utf8') as file:\r\n source = file.read()\r\n except Exception as ex: # nocover\r\n raise IOError('Error reading {}, caused by {}'.format(modpath, repr(ex)))\r\n valid_attrs = None\r\n if respect_all: # pragma: nobranch\r\n try:\r\n valid_attrs = static.parse_static_value('__all__', source)\r\n except NameError:\r\n pass\r\n if valid_attrs is None:\r\n # The __all__ variable is not specified or we dont care\r\n try:\r\n top_level = TopLevelVisitor.parse(source)\r\n except SyntaxError as ex:\r\n msg = 'modpath={} has bad syntax: {}'.format(modpath, ex)\r\n raise SyntaxError(msg)\r\n attrnames = top_level.attrnames\r\n # list of names we wont export by default\r\n invalid_callnames = dir(builtins)\r\n valid_attrs = []\r\n for attr in attrnames:\r\n if attr.startswith('_'):\r\n continue\r\n if attr in invalid_callnames: # nocover\r\n continue\r\n valid_attrs.append(attr)\r\n return valid_attrs", "def _load_symbol(self, index):\n # Load basic information\n name, dim, type_code = self._api.symbol_info(index)\n n_records, vartype, desc = self._api.symbol_info_x(index)\n\n self._index[index] = name # Record the name\n\n attrs = {\n 'index': index,\n 'name': name,\n 'dim': dim,\n 'type_code': type_code,\n 'records': n_records,\n 'vartype': vartype,\n 'description': desc,\n }\n\n # Assemble a string description of the Symbol's type\n type_str_ = type_str[type_code]\n if type_code == gdxcc.GMS_DT_PAR and dim == 0:\n type_str_ = 'scalar'\n try:\n vartype_str_ = vartype_str[vartype]\n except KeyError: # pragma: no cover\n # Some other vartype is returned that's not described by the GDX\n # API docs\n vartype_str_ = ''\n attrs['type_str'] = '{} {}'.format(vartype_str_, type_str_)\n\n debug(str('Loading #{index} {name}: {dim}-D, {records} records, '\n u'\"{description}\"').format(**attrs))\n\n # Equations and Aliases require limited processing\n if type_code == gdxcc.GMS_DT_EQU:\n info('Loading of GMS_DT_EQU not implemented: {} {} not loaded.'.\n format(index, name))\n self._state[name] = None\n return name, type_code\n elif type_code == gdxcc.GMS_DT_ALIAS:\n parent = desc.replace('Aliased with ', '')\n self._alias[name] = parent\n assert self[parent].attrs['_gdx_type_code'] == gdxcc.GMS_DT_SET\n # Duplicate the variable\n self._variables[name] = self._variables[parent]\n self._state[name] = True\n super(File, self).set_coords(name, inplace=True)\n return name, type_code\n\n # The Symbol is either a Set, Parameter or Variable\n try: # Read the domain, as a list of names\n domain = self._api.symbol_get_domain_x(index)\n debug('domain: {}'.format(domain))\n except Exception: # gdxSymbolGetDomainX fails for the universal set\n assert name == '*'\n domain = []\n\n # Cache the attributes\n attrs['domain'] = domain\n self._state[name] = {'attrs': attrs}\n\n return name, type_code", "def __init__(self, node, declare):\n symbol.__init__(self, node, declare, \"general\", \"General\")\n # get the stuff that must be present\n self.subtype = getTag(node, \"subtype\")\n # check if this is a symbol we already know of\n self.subsym = declare.checkLookup(self.subtype)\n # is there a size\n self.size = getOptionalTag(node, \"size\", \"0\")\n self.size = int(self.size)\n if self.size != 0:\n self.subsym = None" ]
[ "0.61585546", "0.6084663", "0.6044661", "0.5943379", "0.5886491", "0.5802792", "0.57020926", "0.5637599", "0.56372637", "0.56197745", "0.56000906", "0.55648845", "0.5503462", "0.5496226", "0.5459958", "0.5452533", "0.54436445", "0.5441584", "0.54250926", "0.540444", "0.5394743", "0.53632957", "0.5361788", "0.5352002", "0.5349226", "0.5338503", "0.53308356", "0.53302443", "0.5311954", "0.5304827" ]
0.6953629
0
Return orders of nodes ipa_ref_list references
def gather_references_orders (gdbval): # TODO: Somehow also note speculative references and attributes in # general vec = gdbval["references"] return [int(i["referred"]["order"]) for i in vec_iter(vec)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_referring_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"referring\"]\n return [int(i[\"referring\"][\"order\"]) for i in vec_iter(vec)]", "def angle_sort_adjacent_nodes(self,n,ref_nbr=None):\n nbrs=self.node_to_nodes(n)\n if len(nbrs)==0:\n return []\n diffs=self.nodes['x'][nbrs] - self.nodes['x'][n]\n angles=np.arctan2(diffs[:,1],diffs[:,0])\n nbrs=nbrs[np.argsort(angles)]\n if ref_nbr is not None: \n i=list(nbrs).index(ref_nbr)\n nbrs=np.roll(nbrs,-i)\n return nbrs", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def _sort_ns(self):\n n = []\n for layer in self.structure:\n n.append(layer.get_index())\n n = np.asarray(n)\n return n", "def rootSetOrder (self):\n order = []\n nodes = set(self.__nodes)\n edge_map = {}\n for (d, srcs) in six.iteritems(self.__edgeMap):\n edge_map[d] = srcs.copy()\n while nodes:\n freeset = set()\n for n in nodes:\n if not (n in edge_map):\n freeset.add(n)\n if 0 == len(freeset):\n _log.error('dependency cycle in named components')\n return None\n order.append(freeset)\n nodes.difference_update(freeset)\n new_edge_map = {}\n for (d, srcs) in six.iteritems(edge_map):\n srcs.difference_update(freeset)\n if 0 != len(srcs):\n new_edge_map[d] = srcs\n edge_map = new_edge_map\n return order", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def referencing_nodes(self):\n\n return self._referencing_nodes", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def sorted_nodes_list(self):\r\n full_sorted_node_list = map(lambda k: k[0], sorted(self.graph.degree(),\r\n key=lambda k: k[1], reverse=True))\r\n return full_sorted_node_list", "def list_of_all_nodes_pointing_to_ref( s, ref ):\n results = []\n for node in s.nodes.values():\n nodetype = sortof_type_str_of(node) \n if nodetype == \"LIST\":\n if ref in node:\n results.append(node)\n elif nodetype == \"DICT\":\n if ref in node.values():\n results.append(node)\n return results", "def order(self):\n return len(self._nodes)", "def order(self, searcher, docnums, reverse = False):\n return docnums", "def reorder(name_list, ref_list):\n result = []\n # Use the ordering in ref_list, to reassemble the name list:\n for name in ref_list:\n # These always come at the end\n if name in ['FTP', 'RSYNC']:\n continue\n if name in name_list:\n result.append(name)\n # For any that were not in the reference list and are left over, tack\n # them on to the end:\n for name in name_list:\n if name not in ref_list:\n result.append(name)\n # Finally, add these, so they are at the very end\n for name in ref_list:\n if name in name_list and name in ['FTP', 'RSYNC']:\n result.append(name)\n \n # Make sure I have the same number I started with\n assert(len(name_list) == len(result))\n return result", "def list_refs(self):\n pass", "def get_ordered_ids(tree):\n ordered_ids = []\n ordered_ids.extend(id(node) for node in tree.gen_tips())\n ordered_ids.extend(id(node) for node in tree.gen_internal_nodes())\n return ordered_ids", "def get_reference_node_parents(ref):\n parents = []\n return parents", "def sequence_elements(self):\n seq_model = self.opt_model.seq_model\n self.elements.sort(key=lambda e:\n seq_model.ifcs.index(e.reference_interface()))", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def dependents(sent,head): # head: node address\n return sorted(chain.from_iterable(sent.nodes[head]\\\n ['deps'].values()))", "def reorder_links(self, previous_node, links): # pragma: no cover\n\t\treturn links", "def find_topo_sort(node_list):\r\n visited = set()\r\n topo_order = []\r\n #print(node_list)\r\n for node in node_list:\r\n topo_sort_dfs(node, visited, topo_order)\r\n return topo_order", "def degeneracy_ordering(self):\n return sorted(self._adjacency_map, key=self.degree)", "def depends_on(self, node):\n return sorted(self.__edge_map[node], key=node_key)", "def get_ordered_nodes(self):\n nodes = []\n self.build_nodes_list(self.root, nodes)\n return nodes", "def sorted_nodes(self):\n if self._sorted_nodes is None:\n self.sorting()\n return self._sorted_nodes", "def toposort(adj):\n # Memoize for visited vertex\n used = [0] * len(adj)\n order = []\n # write your code here\n # Traverse through each vertex\n for i in range(len(adj)):\n if not used[i]:\n # If not visited, run dfs\n dfs(adj, used, order, i)\n\n # Reverse the order list to show in descending order\n order.reverse()\n return order" ]
[ "0.731949", "0.6546845", "0.6098026", "0.6095413", "0.6079038", "0.5992602", "0.5936781", "0.59094507", "0.58531016", "0.58507776", "0.58364546", "0.58358586", "0.58252954", "0.58145785", "0.5806854", "0.5802949", "0.5786999", "0.5780388", "0.5758174", "0.5740272", "0.57386464", "0.5736634", "0.5711007", "0.5710025", "0.568748", "0.56833094", "0.56813157", "0.5680931", "0.56746274", "0.5652924" ]
0.79086727
0
Return orders of nodes referring node associated with ipa_ref_list
def gather_referring_orders (gdbval): # TODO: Somehow also note speculative references and attributes in # general vec = gdbval["referring"] return [int(i["referring"]["order"]) for i in vec_iter(vec)]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def gather_references_orders (gdbval):\n# TODO: Somehow also note speculative references and attributes in\n# general\n vec = gdbval[\"references\"]\n return [int(i[\"referred\"][\"order\"]) for i in vec_iter(vec)]", "def angle_sort_adjacent_nodes(self,n,ref_nbr=None):\n nbrs=self.node_to_nodes(n)\n if len(nbrs)==0:\n return []\n diffs=self.nodes['x'][nbrs] - self.nodes['x'][n]\n angles=np.arctan2(diffs[:,1],diffs[:,0])\n nbrs=nbrs[np.argsort(angles)]\n if ref_nbr is not None: \n i=list(nbrs).index(ref_nbr)\n nbrs=np.roll(nbrs,-i)\n return nbrs", "def reference_nodes_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_idx_references", "def referencing_nodes(self):\n\n return self._referencing_nodes", "def get_reference_node_parents(ref):\n parents = []\n return parents", "def nodes_in_topological_order(self):\n if not self.sorted:\n self._topological_sort()\n return self._topological_order", "def list_of_all_nodes_pointing_to_ref( s, ref ):\n results = []\n for node in s.nodes.values():\n nodetype = sortof_type_str_of(node) \n if nodetype == \"LIST\":\n if ref in node:\n results.append(node)\n elif nodetype == \"DICT\":\n if ref in node.values():\n results.append(node)\n return results", "def _sorting(self, notsorted_list, predecessors):\n remaining_nodes = []\n sorted_part = []\n for nd in notsorted_list:\n if not predecessors[nd.name]:\n sorted_part.append(nd)\n else:\n remaining_nodes.append(nd)\n return sorted_part, remaining_nodes", "def find_topo_sort(node_list):\r\n visited = set()\r\n topo_order = []\r\n #print(node_list)\r\n for node in node_list:\r\n topo_sort_dfs(node, visited, topo_order)\r\n return topo_order", "def _refind_nodes(self, reSearchItems, root=None, sortByDepth=False):\n\n reListOfSearchItems = list(reSearchItems)\n\n if root == None:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for x in self.ParentMap.keys() if ReParent.match(x.tag)]\n\n else:\n Out = [root]\n\n\n while len(reListOfSearchItems) > 0:\n ReParent = reListOfSearchItems.pop(0)\n Out = [x for root in Out for x in root.iter() if ReParent.match(x.tag)]\n\n if sortByDepth == False: return Out\n\n TDict = dict((x, len(self.get_path_to_node(x))) for x in Out)\n return [o[0] for o in sorted(TDict.items(),key=lambda x:x[1])]", "def order(self):\n return len(self._nodes)", "def get_compute_order(graph_def, input_node_name='', input_node_size=None):\n name_to_node = parse_graph_nodes(graph_def)\n node_info = collections.defaultdict(_node_info)\n for each in graph_def.node:\n _get_computed_nodes(name_to_node, each.name, node_info, input_node_name,\n input_node_size)\n return node_info, name_to_node", "def find_topo_sort(node_list):\n visited = set()\n topo_order = []\n for node in node_list:\n topo_sort_dfs(node, visited, topo_order)\n return topo_order", "def find_topo_sort(node_list):\n visited = set()\n topo_order = []\n for node in node_list:\n topo_sort_dfs(node, visited, topo_order)\n return topo_order", "def sort_nodes_by_n_reachable(graph):\n list_of_node_and_reachables_tups = [] # stores the number of reachable nodes per node\n # The following for loop finds the number of reachable nodes per node\n for node_to_test in graph.nodes:\n n_reachable = 0\n # The following for loop checks each node if it is reachable from node_to_test. If so, adds to the counter\n for node_is_reachable in graph.nodes:\n if graph.is_reachable(node_to_test, node_is_reachable) and node_to_test != node_is_reachable:\n n_reachable += 1\n # Adds a tuple with the node_to_test and the counter of reachable nodes\n list_of_node_and_reachables_tups.append((node_to_test, n_reachable))\n # At this point we have a list with tuples including the node name and its reachables. Now need to sort them\n sorted_nodes_by_reachable = sorted(list_of_node_and_reachables_tups, key=lambda tup: tup[1], reverse=True)\n return sorted_nodes_by_reachable", "def toposort(adj):\n # Memoize for visited vertex\n used = [0] * len(adj)\n order = []\n # write your code here\n # Traverse through each vertex\n for i in range(len(adj)):\n if not used[i]:\n # If not visited, run dfs\n dfs(adj, used, order, i)\n\n # Reverse the order list to show in descending order\n order.reverse()\n return order", "def findTopologicalOrder(self):\n # This implementation temporarily messes with reverse stars, must fix at end\n numOrderedNodes = 0\n while numOrderedNodes < self.numNodes:\n nextNode = self.findLeastEnteringLinks()\n if len(self.node[nextNode].reverseStar) > 0:\n print(\"Error: Network given to findTopologicalOrder contains a cycle.\")\n raise BadNetworkOperationException\n numOrderedNodes += 1\n self.node[nextNode].order = numOrderedNodes\n self.node[nextNode].reverseStar = [0] * self.numLinks\n for ij in self.node[nextNode].forwardStar:\n self.node[self.link[ij].head].reverseStar.remove(ij)\n \n # Repopulate reverse star list\n for i in self.node:\n self.node[i].reverseStar = list()\n for ij in self.link:\n self.node[self.link[ij].head].reverseStar.append(ij)", "def depends_on(self, node):\n return sorted(self.__edge_map[node], key=node_key)", "def dmc_order(self):\n return sorted(self.lookup_table, key=lambda clr: int(clr.id) if clr.id.isdigit() else 0)", "def reference_nodes_graph_idx(self) -> Dict[str, torch.Tensor]:\n return self.node_graph_idx_reference", "def toposorted(self):\n order = []\n colors = {node: \"white\" for node in self._neighbors}\n\n def visit(node):\n assert colors[node] == \"white\"\n colors[node] = \"gray\"\n for neighbor in self._neighbors[node]:\n if colors[neighbor] == \"white\":\n visit(neighbor)\n elif colors[neighbor] == \"gray\":\n raise CyclicGraphError(\n \"Cycle involving {!r} and {!r} detected\".format(node, neighbor)\n )\n order.append(node)\n colors[node] = \"black\"\n\n for node in self._neighbors:\n if colors[node] == \"white\":\n visit(node)\n return order", "def referenced_nodes(self):\n return self._referenced_nodes", "def rootSetOrder (self):\n order = []\n nodes = set(self.__nodes)\n edge_map = {}\n for (d, srcs) in six.iteritems(self.__edgeMap):\n edge_map[d] = srcs.copy()\n while nodes:\n freeset = set()\n for n in nodes:\n if not (n in edge_map):\n freeset.add(n)\n if 0 == len(freeset):\n _log.error('dependency cycle in named components')\n return None\n order.append(freeset)\n nodes.difference_update(freeset)\n new_edge_map = {}\n for (d, srcs) in six.iteritems(edge_map):\n srcs.difference_update(freeset)\n if 0 != len(srcs):\n new_edge_map[d] = srcs\n edge_map = new_edge_map\n return order", "def sorted_nodes_list(self):\r\n full_sorted_node_list = map(lambda k: k[0], sorted(self.graph.degree(),\r\n key=lambda k: k[1], reverse=True))\r\n return full_sorted_node_list", "def references(self):\n ref_nodes = self.root.xpath(\".//bib-reference\")\n return list(\n itertools.chain.from_iterable(\n self.get_reference_iter(node) for node in ref_nodes\n )\n )", "def targeted_order(ugraph):\n # copy the graph\n new_graph = copy_graph(ugraph)\n \n counter = 0\n order = [] \n while len(new_graph) > 0:\n max_degree = -1\n for node in new_graph:\n counter += 1\n if len(new_graph[node]) > max_degree:\n max_degree = len(new_graph[node])\n max_degree_node = node\n \n \n neighbors = new_graph[max_degree_node]\n new_graph.pop(max_degree_node)\n for neighbor in neighbors:\n counter += 1\n new_graph[neighbor].remove(max_degree_node)\n\n order.append(max_degree_node)\n return order # uncomment to use graph_resilience_targeted_order", "def list_ref_keys(self):\n print('=======')\n print('REFs')\n print('=======')\n for key in self.refs:\n print(key)", "def _topological_sort(self):\n\n visited = defaultdict(bool)\n stack = []\n\n for pod in self.pods:\n if not visited[pod]:\n self._topological_sort_pod(pod, visited, stack)\n\n return stack[::-1]", "def reorder(name_list, ref_list):\n result = []\n # Use the ordering in ref_list, to reassemble the name list:\n for name in ref_list:\n # These always come at the end\n if name in ['FTP', 'RSYNC']:\n continue\n if name in name_list:\n result.append(name)\n # For any that were not in the reference list and are left over, tack\n # them on to the end:\n for name in name_list:\n if name not in ref_list:\n result.append(name)\n # Finally, add these, so they are at the very end\n for name in ref_list:\n if name in name_list and name in ['FTP', 'RSYNC']:\n result.append(name)\n \n # Make sure I have the same number I started with\n assert(len(name_list) == len(result))\n return result", "def dependents(sent,head): # head: node address\n return sorted(chain.from_iterable(sent.nodes[head]\\\n ['deps'].values()))" ]
[ "0.7483979", "0.6498939", "0.60103464", "0.59589404", "0.592434", "0.58175755", "0.57886195", "0.57791865", "0.5778693", "0.5724725", "0.5648295", "0.56302035", "0.56052834", "0.56052834", "0.55912304", "0.55762887", "0.55696046", "0.55564326", "0.5543322", "0.55140626", "0.5509287", "0.5503459", "0.550083", "0.5495144", "0.54365057", "0.54300624", "0.54293966", "0.54293627", "0.5421236", "0.5412274" ]
0.70957726
1
Build and return our representation of the symbol table
def build_gdb_symbol_table(): tab = Symtab() n = gdb.parse_and_eval ("symtab->nodes") while (long(n)): if symtab_node_is_function (n): current_symbol = GdbFunction(tab, n) tab.all_functions.append (current_symbol) elif symtab_node_is_variable (n): current_symbol = GdbVariable(tab, n) tab.all_variables.append (current_symbol) else: raise gdb.GdbError ("Encountered an unknown symbol table node"); tab.order_to_sym[current_symbol.order] = current_symbol tab.all_symbols.append (current_symbol) n = n["next"] pass tab.fixup() return tab
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))", "def symbol_table(self) -> str:\n return self._symbol_table", "def getSymbolTable(self) -> ghidra.app.util.bin.format.pe.debug.DebugCodeViewSymbolTable:\n ...", "def __init__(self, DEBUG=False):\n self.DEBUG = DEBUG\n\n self.classTable = {}\n self.subroutineTable = {}\n\n self.counts = {}\n self.counts[\"STATIC\"] = 0\n self.counts[\"FIELD\"] = 0\n self.counts[\"ARG\"] = 0\n self.counts[\"VAR\"] = 0\n\n if self.DEBUG:\n print(\"DEBUG(SymbolTable): INITIALIZED SYMBOL TABLES\")", "def generate_symbol_struct(mode, symbols, definition):\n if \"vanilla\" == mode:\n return \"\"\n definitions = []\n hashes = []\n symbol_table_content = \"\"\n for ii in symbols:\n definitions += [\" %s;\" % (ii.generate_definition())]\n hashes += [\" %s%s,\" % (ii.generate_prototype(), ii.get_hash())]\n if \"dlfcn\" != mode:\n symbol_table_content = \" =\\n{\\n%s\\n}\" % (\"\\n\".join(hashes))\n return template_symbol_table % (definition, \"\\n\".join(definitions), symbol_table_content)", "def __str__(self) -> str:\n\n return self._format_symbol_table_content(\"Symbol table\", self._symbols.items())", "def generate_table(self):\n states = self.get_canonical_collection()\n # self.print_canonical_collection(states)\n table = [{} for _ in range(len(states))]\n\n for index in range(len(states)):\n state = states[index]\n first_rule_cnt = 0\n second_rule_cnt = 0\n third_rule_cnt = 0\n beta = []\n for prod in state:\n dot_index = prod[1].index('.')\n alpha = prod[1][:dot_index]\n beta = prod[1][dot_index + 1:]\n if len(beta) != 0:\n first_rule_cnt += 1\n else:\n if prod[0] != 'S1':\n second_rule_cnt += 1\n production_index = self.grammar.P.index((prod[0], alpha))\n elif alpha == [self.grammar.S[0]]:\n third_rule_cnt += 1\n if first_rule_cnt == len(state):\n table[index]['action'] = 'shift'\n\n elif second_rule_cnt == len(state):\n table[index]['action'] = 'reduce ' + str(production_index)\n\n elif third_rule_cnt == len(state):\n table[index]['action'] = 'acc'\n else:\n conflict_msg = 'Conflict! State I' + str(index) + ': ' + str(state) + '\\nSymbol: ' + beta[0]\n raise (Exception(conflict_msg))\n for symbol in self.grammar.N + self.grammar.E: # the goto part of the table\n next_state = self.go_to(state, symbol)\n if next_state in states:\n table[index][symbol] = states.index(next_state)\n # print(\"table\", table)\n return table", "def __init__(self):\r\n self.s_table = SymbolTable.preSymbols", "def __generate_symboltable(self, code):\n\n code_without_lables = []\n address = 0\n for line in code:\n label_code = line.split(':')\n label = label_code[0]\n if len(label) != len(line):\n self.__symboltable[label] = address\n address += REG_SIZE\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n else:\n instruction = label_code.pop().strip()\n code_without_lables = code_without_lables + [instruction]\n\n tokens = instruction.split(' ')\n asm_directive = tokens[0]\n if tokens[0] in AssemblerDirectives.to_string():\n if asm_directive == AssemblerDirectives.ORG.name:\n address = int(tokens[1])\n else:\n address += REG_SIZE\n\n return code_without_lables", "def __init__(self, epsilon=EPSILON):\n cdef bytes name = 'SymbolTable<{0}>'.format(id(self)).encode('ascii')\n self.table = new sym.SymbolTable(<string> name)\n assert (self[epsilon] == EPSILON_ID)", "def create_symbol_table(root):\n\n set_depth(root, 0)\n #Initialize the stack, with the AST root\n stack = Stack(root)\n\n #the symbol table maps the name to the scope.\n #Any node can belong to multiple scopes, therefore this\n #is a list of scope\n symbol_table = STable()\n \n #this represents objects imported from\n #other modules\n other_modules = {}\n\n for node, children, ntype in stack:\n\n if ntype == \"Import\":\n #Import object has names prop which\n #is an array of names\n for name in node.names:\n #name can be the name or an alias \n name_val = name.asname or name.name\n #insert in symbol_table \n symbol_table[name_val] = ()\n\n elif ntype == \"ImportFrom\":\n if node.names[0].name == '*':\n try:\n imp_mod = importlib.import_module(node.module)\n #Add all names in imported module, except those\n #starting with '_'\n for name in dir(imp_mod):\n if name[0] != '_':\n symbol_table[name] = stack_top(scopes)\n\n except ImportError:\n print \"Error: local system does not have {}. Skipping!\".format(node.module)\n pass\n else:\n #TODO: store node.module\n for name in node.names:\n #TODO: store name.name even if name.asname defined \n name_val = name.asname or name.name\n symbol_table[name_val] = stack.get_scopes(src_module=node.module)\n\n elif ntype == \"ClassDef\" or ntype == \"FunctionDef\": \n symbol_table[node.name] = stack.get_scopes()\n \n #NOTE: if a name is being loaded then it already exists and doesn't need\n #to be added to symbol_table\n elif ntype == \"Name\" and not is_load(children) and not has_global(stack.scope_tail(), node.id): \n symbol_table[node.id] = stack.get_scopes()\n\n elif ntype == \"arguments\":\n if node.vararg: \n symbol_table[node.vararg] = stack.get_scopes()\n if node.kwarg:\n symbol_table[node.kwarg] = stack.get_scopes()\n\n elif ntype == \"Global\":\n #add a list global vars on node on the top of \n #the stack\n #nonlocal could be handled in similar way\n set_globals(scopes[-1], node.names)\n\n #set lineno property of children nodes\n set_lineno(node, children)\n\n for child in children[::-1]:\n #set depth of child\n set_depth(child, node.depth + 1)\n #Add children to stack\n stack.append(child)\n\n #Add any new scopes\n #Need to do it here since scoping_nodes are defined in their parent scope\n stack.check_and_push_scope()\n\n print \"Symbol table is \"\n print symbol_table\n return symbol_table", "def show_symbol_table(st):\n print(st)\n # Dump the name lists get_*()\n if isinstance(st, symtable.Function):\n for nlist in _NAME_LISTS:\n names = getattr(st, \"get_\"+nlist)()\n if names:\n print(' {} : {!r}'.format(nlist, names))\n # Dump the properties as short names is_global -> global, etc..\n for s in st.get_symbols():\n scope = to_scope_name(s._Symbol__scope)\n props = [scope]\n for p in _NAME_PROPS:\n if getattr(s, \"is_\"+p)():\n props.append(p)\n print(' \"{}\" : {}'.format(s.get_name(), ', '.join(props)))", "def symbol_table(self, value: str):\n self._symbol_table = value", "def parse_symbol_table(data, sections, elf_header):\n if is64bit(elf_header):\n symbol_entry_str = symbol_64_entry_str\n symbol_entry_spec = symbol_64_entry_spec\n else:\n symbol_entry_str = symbol_32_entry_str\n symbol_entry_spec = symbol_32_entry_spec\n entry_len = struct.calcsize(symbol_entry_str)\n \n st_offset = None\n if \".symtab\" in sections:\n section = \".symtab\"\n if \".strtab\" in sections:\n st_offset = sections[\".strtab\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n elif \".dynsym\" in sections:\n section = \".dynsym\"\n if \".dynstr\" in sections:\n st_offset = sections[\".dynstr\"][\"offset\"]\n else:\n st_offset = sections[section][\"offset\"]\n \n \n if section not in sections:\n return {}, {} \n \n symbols = {}\n imports = {}\n offset = sections[section][\"offset\"]\n size = sections[section][\"size\"]\n index = offset\n while index < offset + size:\n vals = {}\n if len(data) < index+entry_len: \n break\n \n val_data = struct.unpack(symbol_entry_str, data[index:index+entry_len])\n for i, elem in enumerate(symbol_entry_spec):\n vals[elem[0]] = val_data[i]\n \n if st_offset is None:\n symbols[vals[\"name\"]] = vals\n else:\n func_name = get_name_from_string_table(data, st_offset, vals[\"name\"])\n if func_name:\n vals.pop(\"name\")\n vals[\"info\"] = get_symbol_info(vals[\"info\"])\n vals[\"shndx\"] = get_symbol_shndx(vals[\"shndx\"])\n \n if vals[\"info\"] == \"UNDEFINED\" and vals[\"value\"] == 0:\n tmp_name = func_name\n import_name = \"Unknown\"\n if \"@@\" in func_name:\n i = tmp_name.find(\"@@\")\n func_name = tmp_name[:i]\n import_name = tmp_name[i:].strip(\"@@\") \n if import_name not in imports:\n imports[import_name] = {}\n imports[import_name][func_name] = vals\n symbols[func_name] = vals\n \n index += entry_len \n \n return symbols, imports", "def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}", "def construct_table(self):\n table_str = self.header_row\n row_lbls, col_lbls = self.get_idxvals()\n for r,rlbl in enumerate(row_lbls):\n row_data = [self.data[rlbl,clbl] for clbl in col_lbls]\n table_str += self.construct_row(r, row_data)\n \n return table_str", "def generate_symbol_definitions(mode, symbols, prefix, definition):\n direct = []\n tabled = []\n for ii in symbols:\n direct += [ii.generate_rename_direct(prefix)]\n tabled += [ii.generate_rename_tabled(prefix)]\n if \"vanilla\" == mode:\n tabled = direct\n return template_symbol_definitions % (definition, \"\\n\".join(direct), \"\\n\".join(tabled))", "def tables():\n return {\n \"MAT24_STD_OCTAD\" : STD_OCTAD,\n }", "def save(self):\n # First, just allocate enough memory for the SDAT header.\n data = bytearray(0x40)\n\n # -------------------\n # Make the SYMB block\n\n symbolsStringTable = bytearray()\n def addSymbolAndGetOffset(symbol):\n if symbol is None:\n return -1\n offset = len(symbolsStringTable)\n symbolsStringTable.extend(symbol.encode('latin-1') + b'\\0')\n return offset\n\n symbolsHeaderOffsets = []\n\n # Parallel arrays, here.\n symbolsTableValues = []\n shouldIncrementByTableLen = []\n\n anySymbolsInWholeFile = False\n\n def addSymbolsFrom(namedList, nested=False):\n\n # First, figure out if any actual symbols exist\n anyActualSymbols = False\n anyActualSubsymbols = False\n if not nested:\n for symbol, _ in namedList:\n if symbol is not None:\n anyActualSymbols = True\n break\n else:\n for symbol, entry in namedList:\n if symbol is not None:\n anyActualSymbols = True\n break\n for subSymbol, subEntry in entry.sequences:\n if subSymbol is not None:\n anyActualSubsymbols = True\n break\n\n\n nonlocal anySymbolsInWholeFile\n anySymbolsInWholeFile |= anyActualSymbols\n anySymbolsInWholeFile |= anyActualSubsymbols\n\n # If there *are* any symbols, keep going\n symbolsHeaderOffsets.append(len(symbolsTableValues) * 4)\n\n if not nested:\n symbolsTableValues.append(len(namedList))\n shouldIncrementByTableLen.append(False)\n\n for symbol, _ in namedList:\n symbolsTableValues.append(addSymbolAndGetOffset(symbol))\n shouldIncrementByTableLen.append(True)\n\n else:\n mainList, subListsArea = [], []\n mainListSIBTL, subListsAreaSIBTL = [], []\n\n mainList.append(len(namedList))\n mainListSIBTL.append(False)\n\n mainListFullLength = (1 + 2 * len(namedList)) * 4\n subListsAreaOffset = (0x40\n + len(symbolsTableValues) * 4\n + mainListFullLength)\n\n for symbol, entry in namedList:\n\n mainList.append(addSymbolAndGetOffset(symbol))\n mainListSIBTL.append(True)\n\n subListOffset = subListsAreaOffset + len(subListsArea) * 4\n\n if entry is None:\n subNames = []\n else:\n subNames = [n for (n, s) in entry.sequences]\n\n if entry or subNames:\n subListsArea.append(len(subNames))\n subListsAreaSIBTL.append(False)\n\n for subSymbol in subNames:\n subListsArea.append(addSymbolAndGetOffset(subSymbol))\n subListsAreaSIBTL.append(True)\n\n mainList.append(subListOffset)\n mainListSIBTL.append(False)\n\n else:\n mainList.append(0)\n mainListSIBTL.append(False)\n\n symbolsTableValues.extend(mainList)\n symbolsTableValues.extend(subListsArea)\n shouldIncrementByTableLen.extend(mainListSIBTL)\n shouldIncrementByTableLen.extend(subListsAreaSIBTL)\n\n addSymbolsFrom(self.sequences)\n addSymbolsFrom(self.sequenceArchives, True)\n addSymbolsFrom(self.banks)\n addSymbolsFrom(self.waveArchives)\n addSymbolsFrom(self.sequencePlayers)\n addSymbolsFrom(self.groups)\n addSymbolsFrom(self.streamPlayers)\n addSymbolsFrom(self.streams)\n\n # Only add the SYMB block if there are any symbols\n if anySymbolsInWholeFile:\n symbolsBlockOffset = len(data)\n\n symbolsTableLen = len(symbolsTableValues) * 4\n symbolsTable = bytearray()\n for value, shouldIncrement in itertools.zip_longest(symbolsTableValues,\n shouldIncrementByTableLen):\n if value == -1:\n symbolsTable.extend(b'\\0\\0\\0\\0')\n else:\n if shouldIncrement:\n value += symbolsTableLen + 0x40\n symbolsTable.extend(struct.pack('<I', value))\n\n symbolsBlockSize = 0x40 + len(symbolsTable) + len(symbolsStringTable)\n paddedSymbSize = symbolsBlockSize\n while paddedSymbSize % 4:\n paddedSymbSize += 1\n if self.padSymbSizeTo4InSDATHeader:\n symbolsBlockSize = paddedSymbSize\n\n symbolsHeaderOffsetsTable = bytearray()\n for value in symbolsHeaderOffsets:\n if value is None:\n symbolsHeaderOffsetsTable.extend(b'\\0\\0\\0\\0')\n else:\n symbolsHeaderOffsetsTable.extend(struct.pack('<I', value + 0x40))\n\n symbolsHeader = struct.pack('<4sI',\n b'SYMB', paddedSymbSize)\n\n data.extend(symbolsHeader)\n data.extend(symbolsHeaderOffsetsTable)\n data.extend(b'\\0' * 0x18)\n data.extend(symbolsTable)\n data.extend(symbolsStringTable)\n\n else:\n symbolsBlockOffset = None\n symbolsBlockSize = None\n\n\n # -------------------\n # Make the INFO block\n while len(data) % 4: data.append(0)\n infoBlockOffset = len(data)\n\n # Add room to add the header later\n data.extend(b'\\0' * (8 + 8 * 4))\n\n # Pad to 0x20 relative to the INFO block, for some reason\n while (len(data) - infoBlockOffset) % 0x20: data.append(0)\n\n # Helper functions\n def info_declarePart(partNumber):\n struct.pack_into('<I', data, infoBlockOffset + 8 + 4 * partNumber,\n len(data) - infoBlockOffset)\n def addFileAndGetID(file, dataMergeOptimizationID):\n idx = _common.listFind(files, file)\n\n while idx != -1:\n if dataMergeOptimizationID == fileMergeIDs[idx]:\n return idx\n idx = _common.listFind(files, file, idx + 1)\n\n files.append(file)\n fileMergeIDs.append(dataMergeOptimizationID)\n return len(files) - 1\n\n # We encode sections out of order, so that the files will be in\n # the same order as in retail SDATs.\n fileMergeIDs = []\n files = []\n\n # Info part 0: SSEQ\n info_declarePart(0)\n\n data.extend(struct.pack('<I', len(self.sequences)))\n sseqOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequences)))\n\n for i, (_, sseq) in enumerate(self.sequences):\n if sseq is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n (file, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID) = sseq.save()\n fileID = addFileAndGetID(file, sseq.dataMergeOptimizationID)\n\n data.extend(struct.pack('<3H4Bxx',\n fileID, unk02, bankID, volume, channelPressure,\n polyphonicPressure, playerID))\n\n struct.pack_into('<I', data, sseqOffsetsTableOffset + 4 * i, entryOff)\n\n # Info part 1: SSAR\n info_declarePart(1)\n\n data.extend(struct.pack('<I', len(self.sequenceArchives)))\n ssarOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequenceArchives)))\n\n for i, (_, ssar) in enumerate(self.sequenceArchives):\n if ssar is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, _ = ssar.save()\n fileID = addFileAndGetID(file, ssar.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH',\n fileID, unk02))\n\n struct.pack_into('<I', data, ssarOffsetsTableOffset + 4 * i, entryOff)\n\n # Info part 2: SBNK\n info_declarePart(2)\n\n data.extend(struct.pack('<I', len(self.banks)))\n sbnkOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.banks)))\n\n for i, (sbnkName, sbnk) in enumerate(self.banks):\n if sbnk is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, waveArchives = sbnk.save()\n fileID = addFileAndGetID(file, sbnk.dataMergeOptimizationID)\n\n swarIDs = []\n for s in waveArchives:\n swarIDs.append(-1 if s is None else s)\n while len(swarIDs) < 4:\n swarIDs.append(-1)\n\n if len(swarIDs) > 4:\n raise ValueError(f'SBNK {i} (\"{sbnkName}\") uses '\n f'{len(swarIDs)} SWARs. The maximum is 4.')\n\n data.extend(struct.pack('<HH4h',\n fileID, unk02, *swarIDs))\n\n struct.pack_into('<I', data, sbnkOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 3: SWAR\n info_declarePart(3)\n\n data.extend(struct.pack('<I', len(self.waveArchives)))\n swarOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.waveArchives)))\n\n for i, (_, swar) in enumerate(self.waveArchives):\n if swar is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02 = swar.save()\n fileID = addFileAndGetID(file, swar.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH',\n fileID, unk02))\n\n struct.pack_into('<I', data, swarOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 4: Sequence players\n info_declarePart(4)\n\n data.extend(struct.pack('<I', len(self.sequencePlayers)))\n spOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.sequencePlayers)))\n\n for i, (_, sp) in enumerate(self.sequencePlayers):\n if sp is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n maxSequences, channels, heapSize = sp.save()\n\n channelMask = 0\n for j in range(16):\n if j in channels:\n channelMask |= 1 << j\n\n data.extend(struct.pack('<HHI',\n maxSequences, channelMask, heapSize))\n\n struct.pack_into('<I', data, spOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 5: Groups\n info_declarePart(5)\n\n data.extend(struct.pack('<I', len(self.groups)))\n groupOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.groups)))\n\n for i, (_, group) in enumerate(self.groups):\n if group is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n data.extend(struct.pack('<I', len(group)))\n\n for gEntry in group:\n data.extend(struct.pack('<BHxI', *gEntry.save()))\n\n struct.pack_into('<I', data, groupOffsetsTableOffset + 4 * i, entryOff)\n\n\n # Info part 6: Stream players\n info_declarePart(6)\n\n data.extend(struct.pack('<I', len(self.streamPlayers)))\n spOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.streamPlayers)))\n\n for i, (_, sp) in enumerate(self.streamPlayers):\n if sp is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n channels = sp.save()\n chanCount = len(channels)\n while len(channels) < 16:\n channels.append(0xFF)\n\n data.append(chanCount)\n data.extend(channels)\n\n # This has to occur in order for the padding to work out\n # correctly. Weird, but, what can you do. Might even be\n # an unknown value.\n data.extend(b'\\0\\0\\0\\0')\n\n struct.pack_into('<I', data, spOffsetsTableOffset + 4 * i, entryOff)\n\n while len(data) % 4: data.append(0)\n\n\n # Info part 7: Streams\n info_declarePart(7)\n\n data.extend(struct.pack('<I', len(self.streams)))\n strmOffsetsTableOffset = len(data)\n data.extend(b'\\0' * (4 * len(self.streams)))\n\n for i, (_, strm) in enumerate(self.streams):\n if strm is None:\n entryOff = 0\n else:\n entryOff = len(data) - infoBlockOffset\n\n file, unk02, volume, priority, playerID, unk07 = strm.save()\n fileID = addFileAndGetID(file, strm.dataMergeOptimizationID)\n\n data.extend(struct.pack('<HH4B4x',\n fileID, unk02, volume, priority, playerID, unk07))\n\n struct.pack_into('<I', data, strmOffsetsTableOffset + 4 * i, entryOff)\n\n # Now we can finally fill the header in.\n struct.pack_into('<4sI', data, infoBlockOffset,\n b'INFO', len(data) - infoBlockOffset)\n\n infoBlockSize = len(data) - infoBlockOffset\n\n\n # ----------------------\n # Make a dummy FAT block, to be filled in when adding to the\n # FILE block\n\n while len(data) % 4: data.append(0)\n fatBlockOffset = len(data)\n fatBlockSize = 0xC + 0x10 * len(files)\n fatTableOffset = fatBlockOffset + 0xC\n\n fatHeader = struct.pack('<4sII',\n b'FAT ', 0xC + 0x10 * len(files), len(files))\n\n data.extend(fatHeader)\n data.extend(b'\\0' * (0x10 * len(files)))\n\n\n # -------------------\n # Make the FILE block and fill in the FAT block\n while len(data) % 4: data.append(0)\n fileBlockOffset = len(data)\n\n # Dummy header (to be filled in after we know the total size)\n data.extend(b'\\0' * 0xC)\n\n # Some games align the first file differently\n if self.firstFileAlignment is not None:\n while len(data) % self.firstFileAlignment:\n data.append(0)\n\n # Add each file\n for i, file in enumerate(files):\n\n # Files must be aligned to 0x20 relative to the SDAT\n # itself... usually. Some games align to other amounts.\n while len(data) % self.fileAlignment:\n data.append(0)\n\n # Actually add the file\n fileOffset = len(data)\n data.extend(file)\n \n # Add the appropriate FAT entry\n fLen = len(file)\n if self.fatLengthsIncludePadding:\n while fLen % self.fileAlignment: fLen += 1\n\n struct.pack_into('<II', data, fatTableOffset + 0x10 * i,\n fileOffset, fLen)\n\n # And one last pad for good measure. (And because retail files\n # do so.)\n if self.padAtEnd:\n while len(data) % self.fileAlignment:\n data.append(0)\n\n # Add the header\n struct.pack_into('<4sII', data, fileBlockOffset,\n b'FILE', len(data) - fileBlockOffset, len(files))\n\n fileBlockSize = len(data) - fileBlockOffset\n\n\n # -----------------------\n # Put the blocks together\n\n # Write the SDAT header\n struct.pack_into('<8I', data, 0x10,\n 0 if symbolsBlockOffset is None else symbolsBlockOffset,\n 0 if symbolsBlockSize is None else symbolsBlockSize,\n 0 if infoBlockOffset is None else infoBlockOffset,\n 0 if infoBlockSize is None else infoBlockSize,\n 0 if fatBlockOffset is None else fatBlockOffset,\n 0 if fatBlockSize is None else fatBlockSize,\n 0 if fileBlockOffset is None else fileBlockOffset,\n 0 if fileBlockSize is None else fileBlockSize)\n\n # Write the standard header to the beginning\n _common.NDS_STD_FILE_HEADER.pack_into(data, 0,\n b'SDAT', 0xFEFF, 0x100, len(data), 0x40,\n 3 if symbolsBlockOffset is None else 4)\n\n return data", "def create_symbol_to_possible_cell_mapping(self):\r\n symbols_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n for symbol in cell.get_possible_symbols():\r\n symbols_to_cells[symbol].add(cell)\r\n return symbols_to_cells", "def _get_symbols(self):\n\n symbols = self.get_symbols()\n\n if isinstance(symbols, dict):\n keys = ['symbol', 'from_symbol', 'to_symbol']\n correct_keys = np.isin(keys, list(symbols.keys())).all()\n\n if not correct_keys:\n raise ImplementationError('''\n Dict should be in the form:\n {'symbol':[], 'from_symbol':[], 'to_symbol':[]}\n ''')\n else:\n symbols = pd.DataFrame(symbols, index = [symbols['symbol']])\n\n symbols.index = symbols.symbol\n\n return symbols", "def _format_symbol_table_content(self, title: str, symbols: Any) -> str:\n\n header = f\"\\t\\t:::: {title} ::::\"\n lines = [\"\\n\", header, \"__\" * len(header)]\n if type(symbols).__name__ == \"odict_items\":\n lines.extend((f\"| {key}: {value}\") for key, value in self._symbols.items())\n elif type(symbols).__name__ == \"odict_keys\":\n lines.extend((f\"| {key}\") for key in self._symbols.keys())\n\n lines.append(\"\\n\")\n formatted_content = \"\\n\".join(lines)\n return formatted_content", "def list_symbol_tables(mst):\n stlist = []\n def append_st(st):\n #print(st)\n stlist.append(st)\n for s in st.get_symbols():\n for ns in s.get_namespaces():\n append_st(ns)\n if not isinstance(mst, symtable.SymbolTable):\n # Assume it is text of a program to compile\n mst = symtable.symtable(mst, '<string>', 'exec')\n append_st(mst)\n return stlist", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def return_astropy_table(self):\n table = Table()\n for name in self.hdfile.root.Data.Fibers.colnames:\n if hasattr(self, name):\n table[name] = getattr(self, name)\n\n return table", "def __repr__(self):\n return 'HashTable({!r})'.format(self.items())", "def execute(self, symbol_table, test_mode=False):", "def execute(self, symbol_table, test_mode=False):", "def _symbols_table_file_content(\n rule_set: _RewriteRuleSet) -> Generator[str, None, None]:\n\n def _line(symbol: str, index: int) -> str:\n return f\"{symbol}\\t{index}\\n\"\n\n fst_symbols = []\n\n for rule in rule_set.rule:\n fst_symbols.extend(_symbols_of_input(rule.input))\n fst_symbols.extend(_symbols_of_output(rule.output))\n\n unique_symbols = set(fst_symbols).difference({common.EPSILON})\n complex_symbols = [s for s in unique_symbols if len(s) > 1]\n\n index = 983040 # start of the Unicode private use area.\n\n for symbol in sorted(complex_symbols):\n yield _line(symbol, index)\n index += 1\n\n logging.info(\"generated complex symbols file content\")", "def __repr__(self):\n return dict_to_table(self)" ]
[ "0.7142161", "0.7140003", "0.7079655", "0.67236257", "0.66320723", "0.65642846", "0.65429175", "0.65292776", "0.64784485", "0.6340965", "0.6260409", "0.62448263", "0.62234396", "0.61231375", "0.60608643", "0.6057268", "0.5934887", "0.58612394", "0.5844101", "0.5835696", "0.5796547", "0.5714663", "0.56826395", "0.5674987", "0.5674987", "0.567161", "0.56626666", "0.56626666", "0.56476223", "0.56268704" ]
0.7709988
0
Given a question ID, returns a tuple containing a list of the reference answers, human answers (Answer objects), and the canonical answer id
def __getitem__(self, qid): ref = [] if qid in self._reference: ref = self._reference[qid] hum = [] if qid in self._human: hum = self._human[qid] aid = [-1, ""] if qid in self._id: aid = self._id[qid] else: logger.warning("Answer ID %s missing" % qid) return ref, hum, aid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_answer(self, answer_id):\n return self.answers[answer_id]", "def get_answers_by_answer_id(self, answer_id):\n return self._answers_by_id.get(answer_id)", "def get(self, question_id):\n response = Answers.get_all_answers(question_id)\n\n return response", "def get_question_answers(self):\r\n # dict of (id, correct_answer)\r\n answer_map = dict()\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_map.update(results)\r\n\r\n # include solutions from <solution>...</solution> stanzas\r\n for entry in self.tree.xpath(\"//\" + \"|//\".join(solution_tags)):\r\n answer = etree.tostring(entry)\r\n if answer:\r\n answer_map[entry.get('id')] = contextualize_text(answer, self.context)\r\n\r\n log.debug('answer_map = %s', answer_map)\r\n return answer_map", "def find_answers_to_a_question(list_name, question_id):\n\n my_items = [element for element in list_name if element[\n 'question_id'] == question_id]\n\n if my_items:\n return my_items\n return False", "def get_answer_ids(self):\r\n answer_ids = []\r\n for response in self.responders.keys():\r\n results = self.responder_answers[response]\r\n answer_ids.append(results.keys())\r\n return answer_ids", "def get_answers(question_id, api_site_parameter, rpc = None, page = 1, body = False, comments = False, pagesize = 100, sort = 'votes'):\n path = \"questions/%d/answers\" % question_id\n \n query_filter = '.p-I38n'\n \n if body:\n query_filter = '-m8C*uMP-q0'\n if comments:\n query_filter = ')(Ybp0wdAN'\n if body and comments:\n query_filter = 'D9l0ZsiD'\n if pagesize == 0:\n query_filter = '!-q2Rj6nE'\n \n results = __fetch_results(path, api_site_parameter, rpc = rpc, page = page, filter = query_filter, pagesize = pagesize, sort = sort)\n return results", "def __getitem__(self, answer_id):\n return self._answer_dependencies[answer_id]", "def get_question_answer_set(self, id2line, conversations):\n questions = []\n answers = []\n\n # This uses a simple method in an attempt to gather question/answers\n for conversation in conversations:\n if len(conversation) % 2 != 0:\n conversation = conversation[:-1] # remove last item\n\n for idx, line_id in enumerate(conversation):\n if idx % 2 == 0:\n questions.append(id2line[line_id])\n else:\n answers.append(id2line[line_id])\n\n return questions, answers", "def get_question_of_answer(answer):\n dynamodb = boto3.resource(\"dynamodb\", region_name=\"eu-central-1\")\n question_table = dynamodb.Table(\"Questions\")\n\n question_id = answer.get(\"QuestionId\")\n # query topic_id of the question\n try:\n response = question_table.get_item(Key={\"QuestionId\": question_id})\n question = response[\"Item\"]\n except:\n print(\"No question found, returning None..\")\n return None\n return question", "def process(data_item, article_id):\n questions = []\n answers = []\n paragraph = [article_id, data_item['context']]\n\n for item in data_item['qas']:\n question = [item[\"id\"], item[\"question\"], item['is_impossible']]\n questions.append(question)\n if item['is_impossible']:\n continue\n answer_options = item[\"answers\"]\n answer_set = set()\n for option in answer_options:\n answer_tuple = (option['text'], option['answer_start'])\n answer_set.add(answer_tuple)\n for index, answer_tuple in enumerate(answer_set):\n answer = [\"{}_{}\".format(item[\"id\"], index+1), item[\"id\"], answer_tuple[0], answer_tuple[1]]\n answers.append(answer)\n return paragraph, questions, answers", "def get_matching_answers(self, answer):\r\n return [key for key in self.hints if self.compare_answer(key, answer)]", "def answers(self):\n from quiz.models import Answer\n qids = self.values_list('id', flat=True)\n return Answer.objects.filter(\n question__id__in=qids).select_related('question')", "def check_that_suggested_answers_work(problem):\r\n # These are actual answers we get from the responsetypes\r\n real_answers = problem.get_question_answers()\r\n\r\n # all_answers is real_answers + blanks for other answer_ids for which the\r\n # responsetypes can't provide us pre-canned answers (customresponse)\r\n all_answer_ids = problem.get_answer_ids()\r\n all_answers = dict((answer_id, real_answers.get(answer_id, \"\"))\r\n for answer_id in all_answer_ids)\r\n\r\n log.debug(\"Real answers: {0}\".format(real_answers))\r\n if real_answers:\r\n try:\r\n real_results = dict((answer_id, result) for answer_id, result\r\n in problem.grade_answers(all_answers).items()\r\n if answer_id in real_answers)\r\n log.debug(real_results)\r\n assert(all(result == 'correct'\r\n for answer_id, result in real_results.items()))\r\n except UndefinedVariable as uv_exc:\r\n log.error(\"The variable \\\"{0}\\\" specified in the \".format(uv_exc) +\r\n \"solution isn't recognized (is it a units measure?).\")\r\n except AssertionError:\r\n log.error(\"The following generated answers were not accepted for {0}:\"\r\n .format(problem))\r\n for question_id, result in sorted(real_results.items()):\r\n if result != 'correct':\r\n log.error(\" {0} = {1}\".format(question_id, real_answers[question_id]))\r\n except Exception as ex:\r\n log.error(\"Uncaught error in {0}\".format(problem))\r\n log.exception(ex)", "def get_answer(answer_id, api_site_parameter, body = False, comments = False, pagesize = 1):\n path = \"answers/%d\" % answer_id\n \n query_filter = ')(Y_v2R5Tz'\n \n if body:\n query_filter = '-m84pZ4-YWK'\n if comments:\n query_filter = ')(Ybxr-pC9'\n if body and comments:\n query_filter = 'D9kY06hX'\n \n results = __fetch_results(path, api_site_parameter, filter = query_filter, pagesize = pagesize)\n return results", "def get_answer_texts(\n self, answer_token_idxs: Dict[QuestionId, Tuple[Any, ...]]\n ) -> Dict[QuestionId, str]:\n return self.corpus.get_answer_texts(answer_token_idxs)", "def get_answers(self):\r\n if len(self.answer_ids) > 1:\r\n return self.default_answer_map\r\n if self.expect:\r\n return {self.answer_ids[0]: self.expect}\r\n return self.default_answer_map", "def get_submission_metadata(self, answers, correct_map):\r\n\r\n input_metadata = {}\r\n for input_id, internal_answer in answers.iteritems():\r\n answer_input = self.lcp.inputs.get(input_id)\r\n\r\n if answer_input is None:\r\n log.warning('Input id %s is not mapped to an input type.', input_id)\r\n\r\n answer_response = None\r\n for response, responder in self.lcp.responders.iteritems():\r\n for other_input_id in self.lcp.responder_answers[response]:\r\n if other_input_id == input_id:\r\n answer_response = responder\r\n\r\n if answer_response is None:\r\n log.warning('Answer responder could not be found for input_id %s.', input_id)\r\n\r\n user_visible_answer = internal_answer\r\n if hasattr(answer_input, 'get_user_visible_answer'):\r\n user_visible_answer = answer_input.get_user_visible_answer(internal_answer)\r\n\r\n # If this problem has rerandomize enabled, then it will generate N variants of the\r\n # question, one per unique seed value. In this case we would like to know which\r\n # variant was selected. Ideally it would be nice to have the exact question that\r\n # was presented to the user, with values interpolated etc, but that can be done\r\n # later if necessary.\r\n variant = ''\r\n if self.rerandomize != 'never':\r\n variant = self.seed\r\n\r\n is_correct = correct_map.is_correct(input_id)\r\n if is_correct is None:\r\n is_correct = ''\r\n\r\n input_metadata[input_id] = {\r\n 'question': getattr(answer_input, 'loaded_attributes', {}).get('label', ''),\r\n 'answer': user_visible_answer,\r\n 'response_type': getattr(getattr(answer_response, 'xml', None), 'tag', ''),\r\n 'input_type': getattr(answer_input, 'tag', ''),\r\n 'correct': is_correct,\r\n 'variant': variant,\r\n }\r\n\r\n return input_metadata", "def get_answer(self, context: str, question: Union[str, List[str]]) -> None:\n raise NotImplementedError()", "def get_answers(self):\r\n anshtml = '<span class=\"openended-answer\"><pre><code>{0}</code></pre></span>'.format(self.answer)\r\n return {self.answer_id: anshtml}", "def get_answer_to_question(assignment, question_name):\n if not assignment:\n return None\n\n if not assignment.answers:\n return None\n\n for each in assignment.answers[0]:\n if each.qid == question_name:\n return each.fields[0] if each.fields else None\n\n return None", "def get_answer(self, question: Union[str, List[str]]) -> None:\n raise NotImplementedError()", "def correct_answer_for_all(context, question):\n answers = question.get_answers()\n incorrect_list = context.get('incorrect_questions', [])\n if question.id in incorrect_list:\n user_was_incorrect = True\n else:\n user_was_incorrect = False\n\n return {'previous': {'answers': answers},\n 'user_was_incorrect': user_was_incorrect}", "def process_answer(ans):\n\n #TODO: check whether need type coversion?\n ans['parentid'] = int(ans['parentid'])\n ## I remain comments here, maybe can do some sentiment analysis to evaluate score of answer\n return ans", "def answers():\n answer = ([[1, 2, 2, 1], # 3. Not necessarily legal if terms of service disallow\n ['https://soundcloud.com/', # 1. soundcloud, some disallow\n 'https://cfmriweb.ucsd.edu/', # 1. wiki, some disallow\n 'https://www.thesaurus.com/', # 1. thesaurus, some disallow\n 'https://ucsd.sona-systems.com/', # 2. SONA, disallow completely\n 'https://www.linkedin.com/', # 2. LinkedIn, disallow completely\n 'https://facebook.com/']]) # 2. Facebook, disallow completely\n return answer", "def get_questions(self, question_id):\n return self._questions_by_id.get(question_id)", "def __getEgoAndAlterQuestionIds2(self):\n egoQuestionIds = [(\"Q4\",0), (\"Q5X\",0), (\"Q48\",0)]\n alterQuestionIds = [(\"Q184$\",0), (\"Q185$X\",0), (\"Q186$\",0)]\n\n egoQuestionIds.extend([(\"Q51X\",0)])\n alterQuestionIds.extend([(\"Q191$X\",0)])\n\n for i in range(1, self.numProfessions+1):\n egoQuestionIds.append((\"Q7_\" + str(i), 1))\n alterQuestionIds.append((\"Q187$_\" + str(i) , 1))\n\n egoQuestionIds.extend([(\"Q44AX\", 0), (\"Q44BX\", 0), (\"Q44CX\",0), (\"Q44DX\",0)])\n alterQuestionIds.extend([(\"Q180A$X\", 0), (\"Q180B$X\", 0), (\"Q180C$X\",0), (\"Q180D$X\",0)])\n\n egoQuestionIds.extend([(\"Q46A\", 0), (\"Q46B\", 0), (\"Q46C\", 0), (\"Q46D\", 0)])\n alterQuestionIds.extend([(\"Q182A$\", 0), (\"Q182B$\", 0), (\"Q182C$\", 0), (\"Q182D$\", 0)])\n\n return (egoQuestionIds, alterQuestionIds)", "def get_answers(self):\r\n return self.answer_values", "def convert_answers_to_payload_0_0_1(\n *,\n metadata: MetadataProxy,\n response_metadata: MetadataType,\n answer_store: AnswerStore,\n list_store: ListStore,\n schema: QuestionnaireSchema,\n full_routing_path: Iterable[RoutingPath],\n progress_store: ProgressStore,\n supplementary_data_store: SupplementaryDataStore,\n) -> OrderedDict[str, Any]:\n data = OrderedDict()\n for routing_path in full_routing_path:\n for block_id in routing_path:\n answer_ids = schema.get_answer_ids_for_block(block_id)\n answers_in_block = answer_store.get_answers_by_answer_id(\n answer_ids, routing_path.list_item_id\n )\n\n for answer_in_block in answers_in_block:\n answer_schema = None\n\n block: ImmutableDict = schema.get_block_for_answer_id(answer_in_block.answer_id) # type: ignore\n current_location = Location(\n block_id=block_id,\n section_id=routing_path.section_id,\n list_item_id=routing_path.list_item_id,\n )\n question = choose_question_to_display(\n block,\n schema,\n metadata,\n response_metadata,\n answer_store,\n list_store,\n current_location=current_location,\n progress_store=progress_store,\n supplementary_data_store=supplementary_data_store,\n )\n for answer_id, answer in schema.get_answers_for_question_by_id(\n question\n ).items():\n if answer_id == answer_in_block.answer_id:\n answer_schema = answer\n break\n\n value = answer_in_block.value\n\n if answer_schema is not None and value is not None:\n if answer_schema[\"type\"] == \"Checkbox\":\n data.update(\n _get_checkbox_answer_data(\n answer_store, answer_schema, value # type: ignore\n )\n )\n elif \"q_code\" in answer_schema:\n answer_data = _encode_value(value)\n if answer_data is not None:\n data[answer_schema[\"q_code\"]] = _format_downstream_answer(\n answer_schema[\"type\"],\n answer_in_block.value,\n answer_data,\n )\n\n return data", "def get_answers(self):\r\n try:\r\n rxml = self.do_external_request('get_answers', {})\r\n exans = json.loads(rxml.find('expected').text)\r\n except Exception as err: # pylint: disable=W0703\r\n log.error('Error %s', err)\r\n if self.capa_system.DEBUG:\r\n msg = '<span class=\"inline-error\">%s</span>' % str(\r\n err).replace('<', '&lt;')\r\n exans = [''] * len(self.answer_ids)\r\n exans[0] = msg\r\n\r\n if not (len(exans) == len(self.answer_ids)):\r\n log.error('Expected %s answers from external server, only got %s!',\r\n len(self.answer_ids), len(exans))\r\n raise Exception('Short response from external server')\r\n return dict(zip(self.answer_ids, exans))" ]
[ "0.63491833", "0.6239461", "0.6170823", "0.59454125", "0.5801377", "0.57446605", "0.5685475", "0.5676668", "0.56612366", "0.565792", "0.56460077", "0.5644257", "0.5625713", "0.5581985", "0.5564018", "0.55500376", "0.55474454", "0.5515793", "0.5485804", "0.5477037", "0.54237163", "0.5400925", "0.53985196", "0.5344125", "0.5337923", "0.53222305", "0.5307771", "0.5291346", "0.52819765", "0.52540225" ]
0.67215127
0
Given a question, find where "ftp" occurs. Assumes features have been preprocessed.
def find_ftp(features): ftp_pos = -1 for ii in xrange(len(features)): index, word = features[ii] if word == 'ftp': ftp_pos = index return ftp_pos
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_question(message, unique_users, q):\n \n line = get_tagged_user(message['text'], unique_users)[1]\n\n if '?' in line:\n return line\n\n START_WORDS = ['can', 'do', 'will', 'how', 'when', 'what', 'where',\n 'why', 'is', 'does', \"doesn't\", 'if', 'for', 'did', 'is']\n\n for word in START_WORDS:\n if line.lower().startswith(word):\n return line\n\n if fuzz.ratio(line, q) > 20:\n return line\n\n return None", "def locate(tgt_fpath, survey):\n flen = os.stat(tgt_fpath).st_size\n fpaths = survey.get(flen, ())\n if not fpaths:\n return None\n\n for fbase_path in fpaths:\n # print(' '*5, tgt_fpath, fbase_path)\n if not filecmp.cmp(tgt_fpath, fbase_path, shallow=True):\n continue # early reject, try other candidates\n if filecmp.cmp(tgt_fpath, fbase_path, shallow=False):\n # identically equal\n return fbase_path\n\n return None", "def _match(cls, url, **kwargs):\n return url.scheme.startswith('ftp')", "def _get_faction(text):\n for faction in _FACTIONS:\n if faction in text:\n return faction\n return None", "def features(self, sent, position):\n if type(sent[0]) is str:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n elif sent[position].lower() in self.vocab:\n curr_word = 'curr=' + sent[position].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][:2].lower()\n suffix = 'suff=' + sent[position][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position - 1].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position - 1].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position - 2].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position - 2].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position + 1].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position + 1].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position + 2].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n else:\n fts = []\n if self.training:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n elif sent[position][0].lower() in self.vocab:\n curr_word = 'curr=' + sent[position][0].lower()\n fts.append(curr_word)\n else:\n curr_word = 'curr=UNK'\n fts.append(curr_word)\n prefix = 'pref=' + sent[position][0][:2].lower()\n suffix = 'suff=' + sent[position][0][-2:].lower()\n if position == 0:\n prev_word1 = 'prev_word1=*START*'\n fts.append(prev_word1)\n if position == len(sent) - 1:\n next_word1 = 'next_word1=*END*'\n fts.append(next_word1)\n if position >= 1:\n if self.training:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n elif 'prev_word1=' + sent[position-1][0].lower() in self.vocab:\n prev_word1 = 'prev_word1=' + sent[position-1][0].lower()\n fts.append(prev_word1)\n else:\n prev_word1 = 'prev_word1=UNK'\n fts.append(prev_word1)\n\n if position >= 2:\n if self.training:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n elif 'prev_word2=' + sent[position-2][0].lower() in self.vocab:\n prev_word2 = 'prev_word2=' + sent[position-2][0].lower()\n fts.append(prev_word2)\n else:\n prev_word2 = 'prev_word2=UNK'\n fts.append(prev_word2)\n\n if position <= (len(sent) - 2):\n if self.training:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n elif 'next_word1=' + sent[position+1][0].lower() in self.vocab:\n next_word1 = 'next_word1=' + sent[position+1][0].lower()\n fts.append(next_word1)\n else:\n next_word1 = 'next_word1=UNK'\n fts.append(next_word1)\n if position <= (len(sent) - 3):\n if self.training:\n next_word2 = 'next_word2=' + sent[position+2][0].lower()\n fts.append(next_word2)\n elif 'next_word2=' + sent[position+2][0].lower() in self.vocab:\n next_word2 = 'next_word2=' + sent[position + 2][0].lower()\n fts.append(next_word2)\n else:\n next_word2 = 'next_word2=UNK'\n fts.append(next_word2)\n\n if self.training:\n fts.append(prefix)\n elif prefix in self.vocab:\n fts.append(prefix)\n if self.training:\n fts.append(suffix)\n elif suffix in self.vocab:\n fts.append(suffix)\n\n return fts", "def get_matching_answers(self, answer):\r\n return [key for key in self.hints if self.compare_answer(key, answer)]", "def find_corresponding_question(chat, tagged_user, prev_mod_index, index, q, unique_users):\n\n # searches from the previous message sent by the moderator\n for i in range(index, prev_mod_index[1], -1):\n message = chat[i]\n if message['user'] == tagged_user:\n question = is_question(message, unique_users, q)\n if question != None:\n return question\n \n # expanding the search space ft. some additional checks\n for i in range(prev_mod_index[1], prev_mod_index[0], -1):\n message = chat[i]\n \n if message['user'] == tagged_user:\n question = find_question_expanded(message, unique_users, q)\n \n if question != None:\n return question\n\n return None", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def _get_fact(self, fact):\n for kbfact in self.facts:\n if fact == kbfact:\n return kbfact", "def searchTFC(self, lfn):\n if self.tfc == None:\n msg = \"Trivial File Catalog not available to match LFN:\\n\"\n msg += lfn\n print(msg)\n return None\n if self.tfc.preferredProtocol == None:\n msg = \"Trivial File Catalog does not have a preferred protocol\\n\"\n msg += \"which prevents local stage out for:\\n\"\n msg += lfn\n print(msg)\n return None\n\n pfn = self.tfc.matchLFN(self.tfc.preferredProtocol, lfn)\n if pfn == None:\n msg = \"Unable to map LFN to PFN:\\n\"\n msg += \"LFN: %s\\n\" % lfn\n return None\n\n msg = \"LFN to PFN match made:\\n\"\n msg += \"LFN: %s\\nPFN: %s\\n\" % (lfn, pfn)\n print(msg)\n return pfn", "def find_feat_part(artist, albumartist):\n # Look for the album artist in the artist field. If it's not\n # present, give up.\n albumartist_split = artist.split(albumartist, 1)\n if len(albumartist_split) <= 1:\n return None\n\n # If the last element of the split (the right-hand side of the\n # album artist) is nonempty, then it probably contains the\n # featured artist.\n elif albumartist_split[1] != '':\n # Extract the featured artist from the right-hand side.\n _, feat_part = split_on_feat(albumartist_split[1])\n return feat_part\n\n # Otherwise, if there's nothing on the right-hand side, look for a\n # featuring artist on the left-hand side.\n else:\n lhs, rhs = split_on_feat(albumartist_split[0])\n if lhs:\n return lhs\n\n return None", "def getFact(self, fact):\n for kbfact in self.kb.facts:\n if fact == kbfact:\n return True\n return False", "def ask(self, question):\n\n\t\t# If you're just trying to test voice detection, you can uncomment\n\t\t# the following 5 lines. Bobby will guess \"yellow flashlight\" and will prompt\n\t\t# you to correct him by saying \"blue flashlight\"\n\n\t\t# fake_answers = [\"no\", \"yes\", \"yes\", \"yes\", \"no\", \"yes\", \"yes\"]\n\t\t# global count\n\t\t# count += 1\n\t\t# print question\n\t\t# return fake_answers[count - 1]\n\n\t\t# self.say(question)\n\t\t# #starts listening for an answer\n\t\t# self.asr.subscribe(\"TEST_ASR\")\n\t\t# data = (None, 0)\n\t\t# while not data[0]:\n\t\t# \tdata = self.mem.getData(\"WordRecognized\")\n\t\t# #stops listening after he hears yes or no\n\t\t# self.asr.unsubscribe(\"TEST_ASR\")\n\t\t#\n\t\t# print data\n\t\t#\n\t\t# for word in self.yes_no_vocab:\n\t\t# \tfor syn in self.yes_no_vocab[word]:\n\t\t# \t\tif data[0] == syn:\n\t\t# \t\t\treturn word", "def __check_feature(f,stopwords):\n if f == \"\" or f == None:\n return None\n if f == \"RT\":\n return False\n if f == \"via\":\n return False\n if len(re.findall(r\"(\\w)\", f)) < 1:\n return False\n if f == \"&amp\":\n return False\n if f in stopwords:\n return False\n if len(f) < 2:\n return False\n else:\n return True", "def getftpfilename(ftpadress, remotedir, pattern):\n # ftp = FTP('ftp.cdc.noaa.gov')\n ftp = FTP(ftpadress)\n # print(ftp.getwelcome())\n # get direction info\n try:\n ftp.login()\n # ftp.cwd('/Datasets/ncep.reanalysis/pressure')\n ftp.cwd(remotedir)\n files = []\n ftp.dir(files.append) \n # # print(files)\n except ftplib.all_errors as e:\n print('FTP error:', e)\n # decode filename or dirname\n re_files = []\n for file in files:\n # print(file)\n if file.find(pattern) > 0:\n ss = file.split(' ')\n re_files.append(ss[-1]) \n return re_files", "def kb_ask(self, fact):\n print(\"Asking {!r}\".format(fact))\n if factq(fact):\n f = Fact(fact.statement)\n bindings_lst = ListOfBindings()\n # ask matched facts\n for fact in self.facts:\n binding = match(f.statement, fact.statement)\n if binding:\n bindings_lst.add_bindings(binding, [fact])\n\n return bindings_lst if bindings_lst.list_of_bindings else []\n\n else:\n print(\"Invalid ask:\", fact.statement)\n return []\n\n # Make sure to write edge case that if the retracted fact \n # is supported by something - exit?", "def TP_FN_words(test, result):\n tp = 0\n fn = 0\n for i in range(len(test)):\n if(test[i][0] != '_' and test[i][0] == result[i]):\n tp += 1\n elif(test[i][0] != '_' and test[i][0] != result[i]):\n fn += 1\n return tp, fn", "def findFeatures(self):\n\t\tpass", "def _extract_feats(self, a_tweet):\n raise NotImplementedError", "def get_result_ft(wd):\n try:\n result = wd.find_element_by_id(\"js-partial\").text\n ft = clean_goals(result)\n ft = ft.split(\",\")\n return ft[1]\n except:\n return \"N/A FT Result\"", "def pull_suggestion(self, callback, who, arg):\n\t\t\n random_sug = self.dong.db.get_random_row('suggest')\n res = self.google_suggest(callback, who, random_sug[2], False)\n\t\t\n w = res.split()\n if w[0].lower() in ('what', 'why', 'was', 'where', 'who', 'which', 'whom', 'when', 'how', 'is', 'are', 'did'):\n if w[-1:] != '?':\n res = res + '?'\n return res.capitalize()", "def get_match_questions(to_find):\n res_dict, tokens, q_scores = from_code_to_question(to_find)\n res = []\n for key, value in res_dict.items():\n # or use any other \"smarter\" way\n if float(value) > float(4)/5 * tokens:\n res += [key]\n return res", "def get_challenge_suggestions(self, cr, uid, context=None):\n plan_info = []\n goal_plan_obj = self.pool.get('gamification.goal.plan')\n plan_ids = goal_plan_obj.search(cr, uid, [('proposed_user_ids', 'in', uid), ('state', '=', 'inprogress')], context=context)\n for plan in goal_plan_obj.browse(cr, uid, plan_ids, context=context):\n values = {\n 'id': plan.id,\n 'name': plan.name,\n 'description': plan.description,\n }\n plan_info.append(values)\n return plan_info", "def extract_MSQs(line):\n \n tokens = sent_tokenize(line)\n questions = []\n \n for i in range(len(tokens)):\n if tokens[i].endswith('?'):\n q1 = tokens[i] # find first question\n sep = 0 # counter for separation between end of q1 and start q2\n for j in range(i+1, len(tokens)): # look for paired question\n if tokens[j].endswith('?'):\n q2 = tokens[j]\n questions.append((q1, q2, sep))\n break\n else:\n sep += len(tokens[j])\n\n return questions", "def fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"fqdns\")", "def fqdns(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"fqdns\")", "def question_new_search():", "def question(phrase):\n\n if phrase[-1] == '?':\n return 'Sure.'", "def get_language_features(self, queries, gram):\n queries_dim = queries.dim()\n\n if queries_dim==3:\n N = queries.size(0)\n M = queries.size(1)\n num_words = self.num_words[gram]\n queries = queries.view(-1, num_words) # resize (N,M,k) -> (N*M,k)\n\n language_feats = self.language_nets[self.gram_id[gram]](queries)\n\n if queries_dim==3:\n language_feats = language_feats.view(N, M, -1)\n\n return language_feats" ]
[ "0.5377158", "0.5220282", "0.5132496", "0.50776434", "0.49986395", "0.4923811", "0.48578218", "0.4798723", "0.4798723", "0.4798723", "0.475099", "0.46580526", "0.46575606", "0.46463495", "0.46400103", "0.46125606", "0.46123046", "0.46002674", "0.45936626", "0.45734656", "0.4559281", "0.45525655", "0.45519823", "0.45479226", "0.4547633", "0.45454192", "0.45454192", "0.45327592", "0.452723", "0.45193177" ]
0.7341822
0
String. WWID of current mpath.
def wwid(self): return self._uuid
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unique_id(self) -> str:\n return '_'.join(['wavin', self._controller_id, str(self._name), 'battery'])", "def getPath(self):\n uid = str(self._result.uid)\n if not uid.startswith('/zport/dmd'):\n uid = '/zport/dmd/' + uid\n return uid", "def device_path(self):\n return self._engine.device_path()", "def getCmsswBase(self):\n return self[\"CMSSW_BASE\"]", "def get_wd(self):\n raise NotImplementedError", "def windows_name(self):\n return self._windows_name", "def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")", "def workspace_id(self) -> str:\n return pulumi.get(self, \"workspace_id\")", "def iwpath(self):\n return self.server + self.articlepath", "def hardware_id(self):\n return uuid.uuid4()", "def wwn(self) -> SmartSsdWwn:\n return self._wwn", "def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")", "def workspace_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"workspace_id\")", "def get_w(self):\n return self.w", "def directory_id(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"directory_id\")", "def get_name(self) -> str:\n return os.path.split(os.getcwd())[-1]", "def whisper_filename(self):\r\n source_name = self.source_id and self.source.name or ''\r\n return get_valid_filename(\"{0}__{1}.wsp\".format(source_name,\r\n self.name))", "def getWaveformFileName(self):\n return self.waveform_info.split(\":\")[1][:20]", "def wm_raw(self):\n return self.get_par(\"raw_drive\")", "def device_path(self):\n return self._device_path", "def get_char_name(self):\n return self._character_device_path.split('/')[-1]", "def winDir(self, json):\n windDir = str(json['forecast']['simpleforecast']['forecastday'][0]['avewind']['dir'])\n return windDir", "def unique_id(self) -> str:\n return f\"{self._device.unique_id}_battery\"", "def get_char_device_path(self):\n return self._character_device_path", "def GetWPADriver(self):\n return str(self.wifi.wpa_driver)", "def get_device_id(self) -> str:\n return Config.get('device_id')", "def id(self):\n if settings.env_root:\n retpath = self.filename[len(settings.cases_dir):]\\\n .lstrip(os.path.sep)\n base = os.path.splitext(retpath)[0]\n else:\n base = os.path.splitext(os.path.basename(self.filename))[0]\n return base.replace(os.path.sep, '.')", "def getPath(self):\n path = '/'.join(self.getPhysicalPath())\n return path", "def get_device(self) -> str:\n pass", "def unique_id(self) -> str:\n return self.tahoma_device.url" ]
[ "0.6177691", "0.60953784", "0.60286385", "0.59536564", "0.59037435", "0.58918136", "0.58785886", "0.58785886", "0.58731186", "0.5863684", "0.58540994", "0.5815289", "0.5815289", "0.5808639", "0.57726943", "0.57599044", "0.5754434", "0.5745627", "0.57430816", "0.57271993", "0.5700486", "0.56858057", "0.56857485", "0.56746054", "0.56727904", "0.5664702", "0.56622255", "0.5657409", "0.5652112", "0.5628372" ]
0.73362756
0
List of DMMP_path objects
def paths(self): rc = [] for pg in self.path_groups: rc.extend(pg.paths) return rc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path(self) -> List[Path]:\n return self._path", "def listPaths():\n try:\n paths = [x[1] for x in parseFstab(FSTAB)]\n return paths\n except DMException:\n return []", "def _get_path_objs(self, path_list):\n objs = []\n for path in path_list:\n obj = self.unrestrictedTraverse(path, None)\n if obj and getattr(obj, 'isPrincipiaFolderish', 0):\n objs.append(obj)\n \n return objs", "def get_paths(self):\n return self.path.split(',')", "def paths(self):\r\n return self._paths", "def get_paths(self):\n return self.paths", "def list_paths():\n paths = db.session.query(Path).all()\n data = []\n for path in paths:\n data.append({\"id\" : path.id,\n \"title\":path.title,\"rating\":path.rating,\n \"description\":path.description,\"date\":path.date,\n \"start_coordinate\":path.start_coordinate,\n \"end_coordinate\":path.end_coordinate})\n return jsonify(data=data, status=OK_STATUS)", "def paths(self):\n return tuple(self._path)", "def paths(self):\n return self._paths", "def paths(self):\n return self._paths", "def list_dir(self, path):", "def path_entries(self):", "def getPaths(self):\n return self.pathTuple", "def list_path(self, path):\n return LocalResources(\"\").list_path(path)", "def get_object_list(self, url):\n path = self.base_path / url\n return [\n os.fspath((Path(dirpath) / filename).relative_to(path))\n for dirpath, _, files in os.walk(path)\n for filename in files\n if filename != path\n ]", "def GetPaths(self):\n return self.paths", "def _get_paths():\n paths = [\n '/'\n ]\n return paths", "def paths(self):\n return list(zip(*self.collected))[0]", "def getPathItems(self):\n paths = []\n for index in range(self._editor.count()):\n paths.append(self._editor.itemText(index))\n return paths", "def filepaths(self):\n pass", "def get_paths(self):\n return (self.world_fpath, self.subj_fpath, self.peds_fpath)", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def get_paths(self):\n paths = []\n for f in dir(self):\n o = getattr(self, f)\n if callable(o) and hasattr(o, '_path'):\n paths.append(getattr(o, '_path'))\n return paths", "def paths_list(ctx):\n for path in ctx.obj['CLIENT'].paths.list():\n if not path.source.name:\n cidr_blocks = [subnetwork.cidr_block for subnetwork in path.source.subnetworks]\n source_name = \",\".join(cidr_blocks)\n network_name = \"external\"\n else:\n source_name = path.source.name\n network_name = path.source.network.name\n click.echo(\"%s:%s -(%s)-> %s:%s\" % (network_name, source_name, path.port,\n path.network.name, path.destination.name))", "def ls(self, glb='*', limit=0):\n for a in self.ls_names(glb, limit=limit):\n yield Path(a)", "def get_path(self) :\n path = [self]\n s = self.get_parent()\n while s is not None :\n path.append(s)\n s = s.get_parent()\n path.reverse()\n return path", "def paths(self) -> Paths:\n return self._paths", "def get_path_list(self, suffix=img_type):\n img_list = list(filter(lambda x: x.endswith(suffix), self.path_list))\n return img_list", "def get_paths(self, depth=None):\n if not isinstance(self.ref_cell, Cell):\n return []\n if self.origin is not None:\n trans = numpy.array(self.origin)\n else:\n trans = None\n if self.rotation is not None:\n rot = self.rotation * numpy.pi / 180.0\n else:\n rot = None\n return [\n p.transform(trans, rot, self.magnification, self.x_reflection)\n for p in self.ref_cell.get_paths(depth=depth)\n ]", "def getSelectedPaths(self):\n\t\t\n\t\tobjs = self.getSelectedDataUnits()\n\t\treturn [self.dataUnitToPath[x] for x in objs]" ]
[ "0.6900828", "0.6837692", "0.6773243", "0.67213434", "0.6628229", "0.65846545", "0.6583305", "0.65332806", "0.64887327", "0.64887327", "0.64698523", "0.6450161", "0.64455634", "0.64354306", "0.642966", "0.6352989", "0.63092613", "0.6308861", "0.6287949", "0.6274879", "0.62295985", "0.6225884", "0.62081116", "0.61692256", "0.6166501", "0.6110744", "0.61099637", "0.6107075", "0.60869056", "0.6085081" ]
0.7038104
0
The string for DEVNAME used by kernel in uevent.
def kdev_name(self): return self._sysfs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_device_name(self):\n name = self._device[\"name\"]\n if not name or name == \"--\":\n name = self._mac\n\n return name", "def name(self):\n return self.devname", "def name(self):\n return f\"{get_device_name(self._data, 0, self._name)}\"", "def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def device_name(self) -> Optional[str]:\n return pulumi.get(self, \"device_name\")", "def getDeviceName(self):\n name = str(nvmlDeviceGetName(self.handle))\n return name", "def get_dev_name(self):\n\t\treturn call_sdk_function('PrlSrvCfgHdd_GetDevName', self.handle)", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name(self):\n return self._device.name", "def name(self):\n return self.device.name()", "def name(self) -> str:\n return self._device.name or self._device.mac", "def dev_name_prefix(self):\n match = self._name_re.match(self.dev_name)\n if not match:\n raise InvalidDeviceNameError(\"Not a valid device name: '%s'\" %\n self.dev_name)\n\n return match.group(1)", "def name(self) -> str:\n return self.dev.label", "def name(self):\n return self._device.description_pretty()", "def find_iio_device_name(self):\n self.iio_device_dir()\n self.console.runcmd(f\"cat name\", expected=\"\\r\\n\")\n iio_device_name = self.console.output()\n return iio_device_name", "def name(self):\n return f\"{self.device_name} {self.device_variable}\"", "def name(self) -> str:\n return self.config_name or self.host_name or self.dev_id or DEVICE_DEFAULT_NAME", "def get_dev_path(name):\n return '/dev/' + name.replace('!', '/')", "def name(self):\n return self.device.device_data[self.device_id]['name']", "def name(self):\n return self._device.device_data[self._uuid]['name']", "def name(self):\n return self._get_device_class_name()", "def device_object_name(self):\n try:\n return self.get_property(gdef.SPDRP_PHYSICAL_DEVICE_OBJECT_NAME)\n except WindowsError as e:\n if e.winerror not in (gdef.ERROR_INVALID_DATA, gdef.ERROR_NO_SUCH_DEVINST):\n raise", "def name(self):\n _LOGGER.debug(self._shelly_cloud_device_name + ' >>> ' +\n self._shelly_cloud_entity_name + ' >>> name() >>> ' +\n self._shelly_cloud_device_name)\n return self._shelly_cloud_device_name", "def get_device(self) -> str:\n pass", "def name(self):\n if self._connection.location_names:\n return '{} {} {}'.format(self._device.location2, self._device.location, self._device.name)\n else:\n return self._device.name", "def getParentDeviceName(self):\n name = \"\"\n dev = self.device()\n if dev: name = dev.getDeviceName()\n return name", "def dev_name(self):\n if not self.is_rule:\n raise NotRuleError(\"No 'NAME' field.\")\n\n return self._fields[\"NAME\"]" ]
[ "0.7653964", "0.7651574", "0.7606604", "0.75387293", "0.75387293", "0.7465343", "0.7465343", "0.7457964", "0.743613", "0.73530847", "0.73530847", "0.73530847", "0.727464", "0.7246561", "0.7210606", "0.7110714", "0.7101008", "0.70763487", "0.70535266", "0.7051693", "0.7044535", "0.70224977", "0.6986159", "0.6938774", "0.6860222", "0.68584776", "0.6857089", "0.6798473", "0.6788211", "0.67816013" ]
0.77354324
0
Returns n points evenly spaced along the perimeter of a circle of diameter d centered at the origin, if type = 'int' the coordinates are rounded to the neares integer
def perimeter_points(d,n,type = 'int'): rimpointsx = np.sin(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1 rimpointsy = np.cos(np.linspace(0,2*np.pi,num=n,endpoint = False)) + 1 rimpoints = (((d-1)/2))*np.array([rimpointsy,rimpointsx]) if type == 'int': rimpoints = np.round(rimpoints) rimpoints = rimpoints.astype(int) return rimpoints
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self, n):\n perimeter = 2 * math.pi\n return Point(math.cos(n / perimeter), math.sin(n / perimeter))", "def discretized_circle(radius, n_pts):\n x1 = np.zeros(n_pts)\n y1 = np.zeros(n_pts)\n for i in range(0, n_pts):\n x1[i] = np.cos(2 * np.pi / n_pts * i) * radius\n y1[i] = np.sin(2 * np.pi / n_pts * i) * radius\n\n x2 = np.roll(x1, -1)\n y2 = np.roll(y1, -1)\n return x1, y1, x2, y2", "def points_on_circumference(center=(0, 0), r=50, n=100):\n\treturn [\n (\n center[0]+(cos(2 * pi / n * x) * r), \n center[1] + (sin(2 * pi / n * x) * r) \n\n ) for x in range(0, n + 1)]", "def regular_polygon_perimeter_equivalent_radius(n, radius=1.0):\n\n theta = 2 * np.pi / n\n\n r = (theta * radius) / (2 * np.sin(theta / 2.0))\n return r", "def points_on_circumference_with_per(center=(0, 0), r=50, n=100, per = 50):\n\n\t# circum_cnt is actual points on cicumference as a percentage of total \n\t# random points(n) = Percentage_of_Total_Points * n / 100\n\tcircum_cnt = int(per*n/100)\n\n\t# random_cnt is points inside the circle = Total random points - Points on Circum\n\trandom_cnt = n - circum_cnt\n\n\t# Append points on circumference\n\tfinal_pts = [\n\t\t(\n\t\t\tcenter[0]+(cos(2 * pi / circum_cnt * x) * r), \n\t\t\tcenter[1] + (sin(2 * pi / circum_cnt * x) * r) \n\t\t) for x in range(0, circum_cnt + 1)]\n\n\n\n\n\t# Generate random points inside circle\n\t# random points inside circle should have atleast 5 radius to be visible enough\n\tfor i in range(1,random_cnt+1):\n\t\tfinal_pts.append( (center[0]+ cos(2 * pi / circum_cnt * i) * random.randint(1,r-20),\n\t\t\t\t\t\t\tcenter[1] + sin(2 * pi / circum_cnt * i) * random.randint(1,r-20)))\n\n\n\treturn final_pts", "def create_circle_points(n):\n return [math.cos(2 * math.pi * i / float(n)) + \\\n 1j * math.sin(2 * math.pi * i / float(n)) for i in range(n)]", "def incircle(size=None):\n if size is None:\n size = ()\n else:\n try:\n size = tuple(size)\n except TypeError:\n size = (size,)\n n = int(prod(size))\n if n < 330:\n # For small n, interpreted overhead dominates. Using sin and cos\n # results in fewer interpreted instructions than rejection method.\n # Compiled code should never use this algorithm.\n t, z = random((2,) + size + (1,))\n t *= 2. * pi\n return sqrt(z) * concatenate((cos(t), sin(t)), axis=-1)\n # Beats this slightly:\n # xy = standard_normal(size + (2,))\n # return xy * expm1(-0.5 * (xy*xy).sum(axis=-1, keepdims=True))\n # For large n, higher intrinsic cost of sin and cos compared to\n # rejection method dominates, and it is worth taking a few more\n # interpreted instructions to benefit from the superior algorithm.\n nmore = n\n p = []\n fac = 4./pi # 1/prob random point in unit circle\n while nmore > 0: # Odds of needing another pass < 0.0001.\n m = int((nmore + 5.*sqrt(nmore))*fac)\n q = 2.*random((m, 2)) - 1.\n q = q[(q * q).sum(axis=-1) < 1., :]\n p.append(q)\n nmore -= len(q)\n return concatenate(p)[:n].reshape(size + (2,))", "def CirclePoints(center,radius,num_points=10):\n t=np.linspace(0., 2.*np.pi, num_points, endpoint = False)\n # define points\n points=[(center[0]+radius*np.cos(angle),center[1]+\n radius*np.sin(angle)) for angle in t]\n return points", "def num_points_in_circle(d):\n return 6 * d if d > 0 else 1", "def circumscribed_polygon_radius(n, radius=1.0):\n\n theta = 2 * np.pi / n\n radius_out = radius / np.cos(theta / 2)\n\n return radius_out", "def getPointsInCircum(r, n=100, h=0, k=0):\n\n points = [(np.cos(2*np.pi/n*x)*r, np.sin(2*np.pi/n*x)*r) for x in range(0, n+1)]\n x, y = list(zip(*points))\n x = np.array(x)\n y = np.array(y)\n x += h\n y += k\n return (x, y)", "def getCircleDiameter(self):\n segments = []\n for (i, p1) in enumerate(self.points):\n for p2 in self.points[i+1:]:\n segments.append(Segment(p1, p2))\n s = max(segments, key=lambda s: s.length)\n return Circle(*s.middle, radius=s.length/2)", "def perimeter(points):\n return sum(get_distances(points))", "def circle():\n xmin=0\n xmax=6.5\n ymin=0.\n ymax=6.5\n\n x = arange(xmin, xmax, 0.005)\n y = x*1.\n [xx, yy] = meshgrid(x, y)\n\n zz=sqrt((xx-3.2475)**2.+(yy-3.2475)**2.)\n zz2=zz*1.\n zz2[(zz <= 3.25)]=1.\n zz2[(zz <= 3.25*0.2)]=0.\n zz2[(zz > 3.25)]=0.\n zz3=zeros(numpy.array(numpy.shape(zz2))/10)\n for i in arange(len(xx)/10):\n for j in arange(len(yy)/10):\n zz3[i,j]=numpy.sum(zz2[(i*10):(i*10+10),(j*10):(j*10+10)])/100.\n\n return zz3", "def estimate_pi(n):\n points_out = 0\n points_in = 0\n for i in range(n):\n x = random.uniform(0,1)\n y = random.uniform(0,1)\n if math.sqrt(x**2 + y**2) <= 1:\n points_in += 1\n else:\n points_out += 1\n est_pi = (points_in / (points_out + points_in)) * 4\n return est_pi", "def perimeter(a:float, b:float, c:float):\n return a + b + c", "def circle(n=5000, r=1, noise=0.05):\n phis = 2 * np.pi * np.random.rand(n)\n x = [[r * np.sin(phi), r * np.cos(phi)] for phi in phis]\n x = np.array(x)\n x = x + noise * np.random.randn(n, 2)\n return x", "def pentagon(n) -> int:\n\n return (n * (3 * n - 1)) // 2", "def latticepoints(circle_radius, pixel_size):\n\n numlatticepoints = 0\n npixels = int(circle_radius/float(pixel_size))\n for i in range(-npixels, npixels+1, 1):\n for j in range(-npixels, npixels+1, 1):\n if ((i*pixel_size)**2 + (j*pixel_size)**2) <= (np.sqrt(2.*float(npixels*pixel_size)**2))**2:\n #if ((m*pixel_size)**2 + (n*pixel_size)**2) <= npixels**2:\n numlatticepoints = numlatticepoints + 1\n\n return numlatticepoints", "def circle(center, perp_vect, radius, element_number=10):\n # tl = [0, 0.2, 0.4, 0.6, 0.8]\n tl = np.linspace(0, 1, element_number)\n\n # vector form center to edge of circle\n # u is a unit vector from the centre of the circle to any point on the\n # circumference\n\n # normalized perpendicular vector\n n = perp_vect / np.linalg.norm(perp_vect)\n\n # normalized vector from the centre to point on the circumference\n u = perpendicular_vector(n)\n u /= np.linalg.norm(u)\n\n pts = []\n\n for t in tl:\n # u = np.array([0, 1, 0])\n # n = np.array([1, 0, 0])\n pt = (\n radius * np.cos(t * 2 * np.pi) * u\n + radius * np.sin(t * 2 * np.pi) * np.cross(u, n)\n + center\n )\n\n pt = pt.tolist()\n pts.append(pt)\n\n return pts", "def get_circle_radius(self, point, center):\n x, y, z = point[:]\n x0, y0, z0 = center[:]\n return math.sqrt((x-x0)**2 + (y-y0)**2 + (z-z0)**2)", "def circleArea(radius):\n return math.pi * radius * radius", "def _generate_circle(self, center, radius):\n assert len(center) in [2, 3], 'Center of circle must have 2 or 3 elements'\n assert radius > 0, 'Radius must be greater than zero'\n return Point(*center).buffer(radius)", "def pentagonal(n: int) -> int:\n return int(n * (3 * n - 1) / 2)", "def test_circular_scatter():\n area = [0, 1000, 0, 1000]\n size = 1000\n x, y = gridder.circular_scatter(area, size, random=False)\n distances = np.sqrt((x[1:] - x[:-1])**2 + (y[1:] - y[:-1])**2)\n npt.assert_allclose(distances, distances[0]*np.ones(size-1), rtol=1e-09)", "def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius", "def get_circle_coords(self, radius, divider, count,center_x, center_y):\n\n angle_deg = (360/divider)*count\n angle = radians(angle_deg-(90 + (360/divider)))\n x = radius*cos(angle) + center_x;\n y = radius*sin(angle) + center_y;\n return (int(x), int(y))", "def inside_circle(total_count):\n\n x = np.float32(np.random.uniform(size=total_count))\n y = np.float32(np.random.uniform(size=total_count))\n\n radii = ##\n\n count = ##\n\n return count", "def circle_area(radius):\n return math.pi * radius ** 2", "def regular_polygon_area_equivalent_radius(n, radius=1.0):\n\n theta = 2 * np.pi / n\n\n r = np.sqrt((theta * radius ** 2) / np.sin(theta))\n return r" ]
[ "0.73338", "0.7235277", "0.70930624", "0.697346", "0.69107336", "0.6859915", "0.6700372", "0.6552774", "0.64891666", "0.6395865", "0.63475925", "0.6329089", "0.6323116", "0.6279683", "0.6203975", "0.61838937", "0.61728275", "0.617129", "0.6138173", "0.6136083", "0.6120699", "0.6119179", "0.61160463", "0.6100174", "0.607308", "0.6053537", "0.6038392", "0.60314405", "0.60274756", "0.60253906" ]
0.81103927
0
Outputs the matrix with with to adjust the gradient after adding the line between p1 and p2
def line_contribution(p1,p2,alpha = 1): adjust = np.zeros((worksize,worksize,2)) x1 = p1[0] y1 = p1[1] x2 = p2[0] y2 = p2[1] coordinates = coordinate_matrix(worksize) numerator = np.sum(np.multiply(coordinates,np.reshape(np.array(((y2-y1,-(x2-x1)))),(2,1,1))),axis = 0) + x2*y1 - y2*x1 dist_from_line = np.abs(numerator) * (1.0/np.sqrt((y2-y1)**2+(x2-x1)**2)) xcontribution = (x2-x1)*(1/(alpha*dist_from_line+1)) ycontribution = (y2-y1)*(1/(alpha*dist_from_line+1)) return np.array((-ycontribution,xcontribution))/np.sqrt((y2-y1)**2+(x2-x1)**2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_gradient(p1, p2):\n # Ensure that the line is not vertical\n if p1[0] == p2[0]:\n return None\n m = (p1[1] - p2[1]) / (p1[0] - p2[0])\n return m", "def calc_gradu_gradv_p1_partly(topo,x,y):\n ndofs = max(x.shape)\n\n (rows,cols)= la_utils.get_sparsity_pattern(topo)\n\n values = np.zeros(rows.shape)\n\n for row in topo:\n x_l = x[row]\n y_l = y[row]\n eval_points = np.zeros((0,2))\n\n (phi_dx,phi_dy,phi,omega) = basis.tri_p1(x_l,y_l,eval_points)\n dx_j = phi_dx\n dx_i = phi_dx.transpose()\n dy_j = phi_dy\n dy_i = phi_dy.transpose()\n local_matrix = omega*(np.dot(dx_i,dx_j)+np.dot(dy_i,dy_j))\n values = la_utils.add_local_to_global_coo(rows,cols,values,\n row,row,local_matrix)\n\n A = sparse.coo_matrix((values,(rows,cols)),shape=(ndofs,ndofs))\n #plt.spy(A)\n #plt.show()\n A.tocsr()\n\n return A", "def calculate_gradients(self, X, Y):\n Z1 = np.matmul(self.weights[0], X) + self.biases[0] #(30, m)\n A1 = sigmoid(Z1) #(30, m)\n Z2 = np.matmul(self.weights[1], A1) + self.biases[1] #(10, m)\n A2 = sigmoid(Z2) #(10, m)\n # number of examples\n m = X.shape[1]\n dZ2 = A2 - Y #(784, m)\n dW2 = (1 / m) * np.matmul(dZ2, A1.T) #(10, 30)\n db2 = (1 / m) * np.sum(dZ2, axis = 1, keepdims = True) #(10, 1)\n dZ1 = np.multiply(np.matmul(self.weights[1].T, dZ2), sigmoid_deri(Z1)) #(30, m)\n dW1 = (1 / m) * np.matmul(dZ1, X.T) #(30, 784)\n db1 = (1 / m) * np.sum(dZ1, axis = 1, keepdims = True) #(30, 1)\n \n grads = {\"dW1\":dW1, \"db1\":db1, \"dW2\":dW2, \"db2\":db2} \n return grads", "def lineloss(endpoints,gradient):\n l = discrete_line(endpoints[0],endpoints[1])\n direction = endpoints[1]-endpoints[0]\n dperp = np.array((-direction[1],direction[0])) #Perpendicular vector to the direction\n dperp = dperp/np.linalg.norm(dperp)\n lpoints = gradient[:,l[0],l[1]]\n\n return -np.sum(np.abs(np.dot(dperp,lpoints)))", "def generate_line(point_1, point_2):\r\n A = point_1.y - point_2.y\r\n B = point_2.x - point_1.x\r\n C = point_1.y * B + point_1.x * A\r\n return np.matrix([[A],[B],[-C]])", "def backprop(self, x, y):\n \n ### YOUR CODE HERE\n nabla_output = np.zeros(self.params['nh2'])\n del1 = self.aout - y\n nabla_output = del1 * self.ahidden2\n #print (nabla_output)\n del2 = np.zeros(self.params['nh2'])\n for i in range(0, self.params['nh2']):\n del2[i] = self.w_output[i] * del1 * self.ahidden2[i] * (1-self.ahidden2[i])\n # del2 = np.multiply(a1,a2)\n # nabla_input = np.multiply(del2, x)\n nabla_middle = np.zeros([self.params['nh1'],self.params['nh2']])\n for i in range (0, self.params['nh1']):\n for j in range(0, self.params['nh2']):\n #nabla_middle[i][j] = self.ahidden2[j] * del2[j]\n nabla_middle[i][j] = self.ahidden1[i] * del2[j]\n \n #nabla\n del3 = np.zeros(self.params['nh1'])\n # del3 = np.dot(self.w_middle, del2)\n for i in range(0, self.params['nh1']):\n del3[i] = np.dot(self.w_middle[i], del2) * self.ahidden1[i] * (1-self.ahidden1[i])\n # del2 = np.multiply(a1,a2)\n # nabla_input = np.multiply(del2, x)\n nabla_input = np.zeros([x.shape[0],self.params['nh1']])\n for i in range (0, x.shape[0]):\n for j in range(0, self.params['nh1']):\n #nabla_input[i][j] = self.ahidden1[j]*del3[j]\n nabla_input[i][j] = x[i]*del3[j]\n \n \n #nabla_input = np.dot(x, np.transpose(del2))\n ### END YOUR CODE\n \n assert nabla_input.shape == self.w_input.shape\n assert nabla_output.shape == self.w_output.shape\n return (nabla_input, nabla_middle, nabla_output)", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n hahaha233 = MatMulOp()\r\n return [ hahaha233( output_grad, node.inputs[1], False , True) , hahaha233( node.inputs[0] , output_grad , True , False ) ]\r\n #return [output_grad * node.inputs[1] , output_grad * node.inputs[0] ]\r", "def backprop(self, a1, a2, a3, z2, y_enc, w1, w2):\n #backpropagate our error\n sigma3 = a3 - y_enc\n z2 = self.add_bias_unit(z2, column=False)\n sigma2 = w2.T.dot(sigma3) * self.tanh(z2, deriv=True)\n #get rid of the bias row\n sigma2 = sigma2[1:, :]\n grad1 = sigma2.dot(a1)\n grad2 = sigma3.dot(a2.T)\n # add the regularization term\n grad1[:, 1:]+= (w1[:, 1:]*self.l2) # derivative of .5*l2*w1^2\n grad2[:, 1:]+= (w2[:, 1:]*self.l2) # derivative of .5*l2*w2^2\n return grad1, grad2", "def trace_gradient(self,p,direction='upgradient',stepsize=1,well_snap_distance = 1):\r\n \r\n if not direction == 'upgradient' and not direction == 'downgradient':\r\n raise Exception(\"direction must be either 'upgradient' or 'downgradient'.\")\r\n \r\n import scipy.spatial\r\n import numpy as np\r\n import matplotlib.pyplot as plt\r\n import shapely.geometry\r\n \r\n ring = np.column_stack((\r\n np.cos(np.linspace(0,2*np.pi,361)),\r\n np.sin(np.linspace(0,2*np.pi,361)) )) \r\n ring *= self.domain_radius\r\n ring += np.asarray([np.real(self.domain_center),np.imag(self.domain_center)])\r\n \r\n # First, find all elements which could be stoppers\r\n stoppers = []\r\n stoppers.append(shapely.geometry.LineString(ring))\r\n for e in self.elementlist:\r\n \r\n if isinstance(e, ElementHeadBoundary):\r\n # Head Boundaries are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n if isinstance(e, ElementWell):\r\n # Wells are valid end points\r\n stoppers.append(shapely.geometry.Point(np.asarray([np.real(e.zc),np.imag(e.zc)])))\r\n \r\n if isinstance(e, ElementLineSink):\r\n # Line Sinks are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n if isinstance(e, ElementNoFlowBoundary):\r\n # No-flow Boundaries are valid end points\r\n stoppers.append(shapely.geometry.LineString(e.line[:,:2]))\r\n \r\n def gradient(p1,p2,p3,z1,z2,z3):\r\n \r\n area = abs((p1[0]*(p2[1]-p3[1])+p2[0]*(p3[1]-p1[1])+p3[0]*(p1[1]-p2[1]))/2)\r\n \r\n M = np.asarray(\r\n [[p2[1]-p3[1], p3[1]-p1[1], p1[1]-p2[1]],\r\n [p3[0]-p2[0], p1[0]-p3[0], p2[0]-p1[0]]])\r\n \r\n U = np.asarray([z1,z2,z3]).reshape((3,1))\r\n \r\n # Solution based on http://pers.ge.imati.cnr.it/livesu/papers/MLP18/MLP18.pdf Equation 1\r\n return np.dot(M,U)[:,0]/(2*area)\r\n \r\n # Check if the start point is complex, if yes, turn it into a real vector\r\n if np.iscomplex(p).any():\r\n p = np.asarray([np.real(p),np.imag(p)])\r\n \r\n # Depending on the direction, add a gradient\r\n if direction == 'upgradient':\r\n stepsize = stepsize\r\n else:\r\n stepsize = -stepsize\r\n \r\n # Set the repeater boolean to True\r\n repeater = True\r\n \r\n \r\n \r\n # Re-arrange the starting point into an array\r\n points = np.asarray(p).copy().reshape((1,2))\r\n # \"\"\"\r\n # Get three points \r\n testpoints = np.asarray([\r\n points[-1,0] + 1j*points[-1,1],\r\n points[-1,0] + stepsize/100 + 1j*points[-1,1],\r\n points[-1,0] + 1j*points[-1,1] + 1j*stepsize/100])\r\n \r\n testpoints = np.real(self.evaluate(testpoints,mode='head'))\r\n \r\n grad = np.asarray([\r\n testpoints[1]-testpoints[0],\r\n testpoints[2]-testpoints[0]])/stepsize*100\r\n grad = grad/np.linalg.norm(grad)\r\n # \"\"\"\r\n \r\n # grad = self.evaluate(\r\n # z = points,\r\n # mode = 'gradient',\r\n # derivatives = 'phi')\r\n # # grad = np.asarray([np.real(grad), np.imag(grad)])\r\n # grad = grad/np.linalg.norm(grad)\r\n \r\n # And save the result to the points array\r\n points = np.row_stack((\r\n points.copy(),\r\n points + grad*stepsize))\r\n \r\n # Now start the while loop, trace until the end\r\n while repeater:\r\n \r\n # The last point in the array is the starting point\r\n p = points[-1,:]\r\n \r\n # \"\"\"\r\n testpoints = np.asarray([\r\n points[-1,0] + 1j*points[-1,1],\r\n points[-1,0] + stepsize/100 + 1j*points[-1,1],\r\n points[-1,0] + 1j*points[-1,1] + 1j*stepsize/100])\r\n \r\n testpoints = np.real(self.evaluate(testpoints,mode='head'))\r\n \r\n grad = np.asarray([\r\n testpoints[1]-testpoints[0],\r\n testpoints[2]-testpoints[0]])/stepsize*100\r\n \r\n grad = grad/np.linalg.norm(grad)\r\n # \"\"\"\r\n \r\n # grad = self.evaluate(\r\n # z = points[-1,:],\r\n # mode = 'gradient',\r\n # derivatives = 'phi')\r\n # # grad = np.asarray([np.real(grad), np.imag(grad)])\r\n # grad = grad/np.linalg.norm(grad)\r\n \r\n # And append the next step to the list\r\n points = np.row_stack((\r\n points,\r\n points[-1,:] + grad*stepsize))\r\n \r\n \r\n line = shapely.geometry.LineString(points[-2:,:])\r\n \r\n # Check for stopping elements\r\n for stop in stoppers:\r\n \r\n # If this stopper is a well, check for distance\r\n if stop.type == 'Point':\r\n point = shapely.geometry.Point(points[-1,:])\r\n if point.distance(stop) <= well_snap_distance:\r\n points[-1,:] = np.asarray(point.xy)[:,0]\r\n repeater = False\r\n \r\n # Else, we can check for intersection\r\n else:\r\n if line.intersects(stop):\r\n \r\n if line.intersection(stop).type == 'Point':\r\n \r\n points[-1,:] = np.asarray(line.intersection(stop).xy)[:,0]\r\n repeater = False\r\n \r\n else:\r\n \r\n print(type(line.intersection(stop)))\r\n print((type(line.intersection(stop)) == 'Point'))\r\n \r\n points[-1,:] = np.asarray(line.intersection(stop)[0].xy)[:,0]\r\n repeater = False\r\n\r\n# # Check for oscillation\r\n# p2p = points[-3,:]-points[-2,:]\r\n# p1p = points[-2,:]-points[-1,:]\r\n# if np.inner(p1p,p2p) < 0: \r\n# # The trace direction has change by more than 90 degrees, i.e.\r\n# # turned back; stop iterating\r\n# points = points[:-1,:]\r\n# repeater = False\r\n \r\n return points", "def _UpdateGradient(self):\n self.mol.GetGradient('analytic')", "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0]) ), 0-auto_sum_op(output_grad, get_shape_op(node.inputs[1]) )]\r\n #return [auto_sum_op(output_grad, ), 0-output_grad]\r", "def gradient_merge_arrays(cls, image_one, image_two):\n if image_one.shape != image_two.shape:\n raise AttributeError(\"shapes do not match: {} vs {}\".format(image_one.shape, image_two.shape))\n height = image_one.shape[0]\n vector_one = numpy.array([1.0 - float(i + 1) / (height + 1) for i in range(height)])\n vector_two = numpy.array([float(i + 1) / (height + 1) for i in range(height)])\n return (image_one * vector_one[:, numpy.newaxis]) + (image_two * vector_two[:, numpy.newaxis])", "def Map_Gradients(post_eval,q,InvV,m_points):\n m = InvV.n\n N = m_points.num\n d = InvV.d\n \n ds_dq = np.zeros([m,N])\n dr_dq = np.zeros([m,N])\n \n ds_db = np.zeros([m,d,N])\n dr_db = np.zeros([m,d,N])\n \n ds_dL = np.zeros([m,d,d,N])\n dr_dL = np.zeros([m,d,d,N])\n \n dB_dL = np.zeros([m,d,d,d,N])\n dM_dL = Cholesky_Derivs(InvV,m_points)\n Q = Partitioner(q, InvV, post_eval, m_points)\n \n for j in range(m):\n backtrack = m_points.map(InvV,j)\n ds_dq[j,:] = - Q[j,:] / q[j]\n dr_dq[j,:] = ds_dq[j,:] - np.mean(ds_dq[j,:])\n \n for k in range(d):\n ds_db[j,k,:] = Q[j,:] * backtrack.all[:,k].T\n dr_db[j,k,:] = ds_db[j,k,:] - np.mean(ds_db[j,k,:])\n \n for l in range(d):\n for i in range(N):\n for row in range(d):\n for col in range(d):\n dB_dL[j,row,k,l,i] += m_points.pick(i)[col] * dM_dL[j,row,col,k,l]\n ds_dL[j,k,l,i] = Q[j,i] * np.inner(backtrack.pick(i),dB_dL[j,:,k,l,i])\n if k == l:\n ds_dL[j,k,l,:] += (2/InvV.L[j,k,l])\n \n dr_dL[j,k,l,:] = ds_dL[j,k,l,:] - np.mean(ds_dL[j,k,l,:])\n \n return dr_dq, dr_db, dr_dL", "def gradient(self, node, output_grad):\r\n return [conv2d_grad_op1(node.inputs[0], node.inputs[1], node.const_attr , output_grad),conv2d_grad_op2(node.inputs[0], node.inputs[1], node.const_attr , output_grad)]", "def back_propagation(params, x, y_map, hyper_p=0):\n\n m = len(x)\n\n theta1, theta2 = unroll_params(params)\n d1 = np.zeros_like(theta1)\n d2 = np.zeros_like(theta2)\n\n history = feed_forward([theta1, theta2], x)\n a3 = history[1][1]\n a2 = history[0][1]\n z2 = history[0][0]\n a1 = np.insert(x, 0, 1, axis=1)\n\n delta3 = a3 - y_map\n delta2 = (theta2.T @ delta3)[1:] * sig_gradient(z2)\n\n d1 = (d1 + delta2 @ a1) / m\n d2[:, 1:] = (d2[:, :1] + delta3 @ a2.T) / m\n\n d1[:, 1:] = d1[:, 1:] + (hyper_p / m) * theta1[:, 1:]\n d2[:, 1:] = d2[:, 1:] + (hyper_p / m) * theta2[:, 1:]\n\n return np.concatenate([d1.ravel(), d2.ravel()])", "def gradient(self, node, output_grad):\r\n return [auto_sum_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def gradient(self, x):\n pass", "def calculate_parameter_gradients(logger, params_1, params_2):\n logger.debug(\"Shape of model_1_parameters: {}\".format(str(len(params_1))))\n logger.debug(\"Shape of model_2_parameters: {}\".format(str(len(params_2))))\n\n return numpy.array([x for x in numpy.subtract(params_1, params_2)])", "def gradient(self, node, output_grad):\r\n \"\"\"TODO: Your code here\"\"\"\r\n return [auto_sum_op(output_grad / node.inputs[1] ,get_shape_op(node.inputs[0])), auto_sum_op(-output_grad * node.inputs[0] / node.inputs[1] / node.inputs[1] , get_shape_op(node.inputs[1]) ) ]", "def backPropagate(self):\n\n # application of the chain rule to find derivative of the loss function with respect to weights2 and weights1\n d_weights2 = np.dot(self.layer1.T, (2*(self.y - self.output) * sigmoid_derivative(self.output)))\n d_weights1 = np.dot(self.input.T, (np.dot(2*(self.y - self.output) * sigmoid_derivative(self.output), self.weights2.T) * sigmoid_derivative(self.layer1)))\n\n # update the weights with the derivative (slope) of the loss function\n self.weights1 += d_weights1\n self.weights2 += d_weights2", "def gradient(self, node, output_grad):\r\n return [auto_broadcast_op(output_grad, get_shape_op(node.inputs[0])) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def gradient(self, inputs):\n raise NotImplementedError", "def gradient(self, X, V, W, Y):\n one, d_plus_one = X.shape\n K, H_plus_one = W.shape\n d = d_plus_one - 1\n H = H_plus_one - 1\n\n Z, Yhat = self.forward(X, V, W)\n assert one == 1\n x = X\n y = Y\n z = Z.ravel()\n yhat = Yhat.ravel()\n\n # Update W\n # grad__L__yhat = (yhat - y) / np.clip(yhat * (1 - yhat), EPSILON, inf)\n # grad__L__z[:] = 0.0\n # for k in range(K):\n # grad__yhat_k__W_k = z * yhat[k] * (1 - yhat[k])\n # # Last element corresponds to constant offset 1 appended to z\n # # vector; it does not change / has no derivative.\n # grad__yhat_k__z = W[k, :-1] * yhat[k] * (1 - yhat[k])\n # grad__L__z += grad__L__yhat[k] * grad__yhat_k__z\n # W[k, :] -= self.learning_rate * grad__L__yhat[k] * grad__yhat_k__W_k\n grad__L__z = (W.T * (yhat - y)).sum(axis=1)\n zz = z.reshape((1, H + 1)).repeat(K, 0)\n grad__L__W = diag(yhat - y) @ zz\n\n # Update V\n # for h in range(H):\n # grad__z_h__V_h = x * (1 - z[h] ** 2)\n # grad__L__V_h = grad__L__z[h] * grad__z_h__V_h\n # V[h, :] -= self.learning_rate * grad__L__V_h\n xx = x.reshape((1, d + 1)).repeat(H + 1, 0)\n grad__L__V = diag((1 - z ** 2) * grad__L__z) @ xx\n\n return grad__L__V, grad__L__W", "def grad_loss_wrt_w(self, x, y):\n (N, D) = x.shape\n k1 = np.matmul(x, np.transpose(self.w)) + self.b\n y1 = y.reshape((N,1))\n dr = (1 + np.exp(1 * y1 * k1))\n nr = -y1 * x\n c1 = nr/dr\n #(N1,D1) = self.w.shape\n #c2 = np.zeros((N1,D1))\n #for i in range(N):\n # c2[i-1] = c1[i-1,:] + c1[i,:]\n #l_w = c2/N\n l_w1 = np.mean(c1,axis=0)\n return l_w1\n\n\n #raise NotImplementedError", "def gradient_step(self):\n n = 3 #Granularity of line search\n grad = self.gradient()\n #grad = grad/np.linalg.norm(grad, 2)\n W = project(self.W[-1] + grad)\n A = np.linspace(0., 1., n+2)[1:-1]\n Objective = map(self, [(1. - a)*self.W[-1] + a*W for a in A])\n a = A[np.argmax(Objective)]\n W = (1. - a)*self.W[-1] + a*W\n obj = np.max(Objective)\n self.objective.append(obj)\n self.W.append(W)\n self.iterations += 1", "def gradient(self, node, output_grad):\r\n return [ - output_grad]", "def backward(self, X, Y, P, H, lmd):\n G = - (Y - P)\n _, Npts = P.shape\n n_layers = len(self.hidden_units)\n\n gradients_W = []\n gradients_b = []\n\n for i in range(n_layers, -1, -1):\n\n if i == 0:\n grad_W = G @ X.T * (1/Npts) + 2 * lmd * self.W[i]\n grad_b = G @ np.ones((Npts, 1)) * (1/Npts)\n\n else:\n\n h = H[i - 1]\n w = self.W[i]\n grad_W = G @ h.T * (1/Npts) + 2 * lmd * w\n grad_b = G @ np.ones((Npts, 1)) * (1/Npts)\n\n G = w.T @ G\n G = G * np.where(h > 0, 1, 0)\n\n gradients_W.append(grad_W)\n gradients_b.append(grad_b)\n\n return gradients_W, gradients_b", "def gradient(self, node, output_grad):\r\n return [relu_op(output_grad, node.inputs[1]) , zeroslike_op(node.inputs[1])]\r\n #assert True\r", "def gradient(self, node, output_grad):\r\n return [reshape_op(output_grad , get_shape_op(node.inputs[0])), zeroslike_op(node.inputs[1])]", "def FormG():\n for i in range(2):\n for j in range(2):\n G[i, j] = 0.0\n for k in range(2):\n for l in range(2):\n G[i, j] = G[i, j] + P[k, l] * (TT[i, j, k, l] - 0.5 * TT[i, j, k, l])" ]
[ "0.64249635", "0.5964822", "0.58919156", "0.5855966", "0.5814256", "0.57597506", "0.57327527", "0.5700635", "0.5680193", "0.5670194", "0.5667574", "0.56395584", "0.56350654", "0.56335413", "0.5621072", "0.5597152", "0.559362", "0.55905247", "0.55903786", "0.55802053", "0.5572114", "0.5559869", "0.5556893", "0.55544233", "0.554743", "0.55456823", "0.5537265", "0.5537103", "0.5528376", "0.552716" ]
0.59735805
1
`clicked on board | || yes No | | selected from where nothing to do | | | | selection_bar Board no selection | | | is clicked on valid position is clicked on valid position(Rajan) clicked on empty slot | | | (it returns pgn) | | | | | | | yes no yes no | | | | | | place it (if his piece) does move contain 'x' (capture) select piece no yes (if not check) change selection | | (Ask & Discuss) | | yes no is it his piece pass | | | capture and update both pieces state just place piece | | yes no | | select the piece pass
def main_board_maintenance(self,x_cor,y_cor): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.display.quit() pygame.quit() quit() if event.type == pygame.MOUSEBUTTONDOWN: x_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board') #print(x_adjusted/80,y_adjusted/80) if self.selected_from_selection_bar : #print('inside selection bar selection option') x_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board') temp_game_state = CP.game_data() temp_game_state = copy.deepcopy(self.game_state) data_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted)) temp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani())) temp_game_state.active_color = not temp_game_state.active_color fen = temp_game_state.generate_fen() board2 = chess.Board(fen=fen) print(board2) print(fen) print('board2.is_check()',board2.is_check()) #now we need to place the piece on board if self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None: #print(self.selected_position) if not board2.is_check(): if self._check_valid_position_(x_adjusted,y_adjusted): self.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted) #rajan's #print(self.selected_piece) #print(self.selected_position) data_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted)) self.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani())) self.selected_piece = None self.selected_position = None self.computer_turn =True else: pass #board position is filled then nothing to do else: #if his piece change selection self.selected_from_selection_bar =False self.selected_from_board = True self.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]] self.selected_position = (x_adjusted,y_adjusted) elif self.selected_from_board: #print('inside selection bar board option') x_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board') omega = True if self.selected_position: if self.selected_position == (x_adjusted,y_adjusted): omega = False #print(self.selected_position,(x_adjusted,y_adjusted)) if omega: move = self._check_valid_move_(x_adjusted,y_adjusted) print(move) if omega: if move: self.computer_turn = True #if move contains x then we have update state of captured piece #else just update selected piece #print("correct move") self.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted) else: #select the piece if self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]: self.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]] self.selected_position = (x_adjusted,y_adjusted) self.selected_from_board = True else: x_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board') if self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]: #select the piece if self.whose_move == 'white': if 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]: self.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]] self.selected_from_board = True self.selected_position = (x_adjusted,y_adjusted) else: #nothing to do pass elif self.whose_move == 'black': if 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]: self.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]] self.selected_from_board = True self.selected_position = (x_adjusted,y_adjusted) else: #nothing to do pass else: #it is none means nothing is their so nothing to do pass else: #print("not_pressed") pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def selection_board_maintenance(self,x_cor,y_cor):\t\t\r\n\t\tfor event in pygame.event.get():\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit() \r\n\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\t\t\t\t#print(\"mouse is pressed\")\r\n\t\t\t\t#everything begins here\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\t\t\t\t#print(who_is_clicked)\r\n\t\t\t\tif (self.selected_from_selection_bar + self.selected_from_board):\r\n\t\t\t\t\t#print(\"inside selected item one\")\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\t\t\t\t\t\t\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_board = False\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =Helping_Class.selection_bar_reverse_mapping[piece]\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\telse:\r\n\t\t\t\t\t#print(\"nothing is selected\")\r\n\t\t\t\t\t#check if clicked on his piece change then select it\r\n\t\t\t\t\tif Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked):\r\n\r\n\t\t\t\t\t\tif self.pieces[piece].availability:\r\n\t\t\t\t\t\t\tself.selected_from_selection_bar = True\r\n\r\n\t\t\t\t\t\t\t#update \r\n\t\t\t\t\t\t\tself.selected_piece = piece\r\n\t\t\t\t\t\t\tself.selected_position =(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t#print(self.selected_piece,self.selected_position,self.selected_from_selection_bar)\r\n\r\n\t\t\t\t\t\t\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\t\r\n\t\t\t\t\r\n\t\t\telse:\r\n\t\t\t\t#color change\r\n\t\t\t\t#who_is_clicked is dummy variable as no click has occurred\r\n\t\t\t\tx_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t y_cor,\r\n\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t\t 'selection_bar')\r\n\r\n\t\t\t\tself.blit_piece = [(x_adjusted,y_adjusted),piece]", "def click(self, event):\n x = self.ptgrid(event.x)\n y = self.ptgrid(event.y)\n \n # x = loc[0]\n # y = loc[1]\n\n # if self.gamestate == self.STATE_TITLE_SCREEN:\n # self.new_board()\n # self.gamestate = FIRST_PLAYER\n\n\n #duplication /!\\\n if (self.board[y][x] == self.EMPTY and self.p2pGame.isReady):\n if(self.p2pGame.playerTurn == 'X' and self.player == 1):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--X:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'O'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif(self.p2pGame.playerTurn == 'O' and self.player == 2):\n self.new_move(x, y, self.player)\n\n if self.has_won(self.player):\n self.gamestate = self.STATE_GAME_OVER\n if self.player == 1:\n self.gameover_screen('X Gagne')\n data = \"--W:X\"\n else:\n self.gameover_screen('O Gagne')\n data = \"--W:O\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n\n elif self.is_a_draw():\n self.gamestate = self.STATE_GAME_OVER\n self.gameover_screen('Egalité')\n data = \"--D\"\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n\n else:\n data = \"--O:\"+ str(x) + \":\" + str(y)\n self.p2pGame.playerTurn = 'X'\n self.p2pGame.sendTicTacToeData(text=data)\n # self.gamestate = self.STATE_O_TURN\n #self.launch()\n elif self.gamestate == self.STATE_GAME_OVER:\n #reset\n self.new_board()\n self.gamestate = self.FIRST_PLAYER\n self.p2pGame.sendPlayAgain(\"--A\")", "def select_move(self, index: int):\n # if the player reclicks on the piece, they are putting it down\n if (index == self.selected_move):\n self.selected_move = -1\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self._sync_gui()\n return\n # if the player has not selected a piece, and the piece they clicked on is a black team piece. Pick up the piece\n if (self.selected_move == -1) and (\n self.game_board.board[index].team_id == BLACK_TEAM_ID\n ):\n self.selected_move = index\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = (\n f\"{index} : {self.game_board.board[index].name}\"\n )\n\n self._sync_gui()\n return\n # if they selected a piece that is not on their team to pick up\n if self.selected_move == -1:\n return\n # if the player is holding one of their pieces and they select on a valid move\n if self.game_board.validate_move(start=self.selected_move, end=index):\n self.game_board.move_pieces(start=self.selected_move, end=index)\n self.selected_move = -1\n\n self.turn_value_text = \"AI (White)\"\n self.selected_piece_value_text = f\"N/A\"\n self._sync_gui()\n time.sleep(0.5)\n self._ai_move()", "def clickPiece(self, event):\n r = event.widget.grid_info()['row']\n c = event.widget.grid_info()['column']\n er, ec = self.emptyPiece\n \n #print('clicked: %i, %i' %(r, c))\n \n if r==er: # pieces are on same row as empty\n if c < ec: # columns slide right\n numToSlide = ec-c\n for i in range(0,numToSlide):\n piece = self.findPieceByRC(r, ec-1-i)\n piece.grid(row=r,column=ec-i)\n \n elif c > ec: # slide left\n numToSlide = c-ec\n for i in range(0,numToSlide):\n piece = self.findPieceByRC(r, ec+1+i)\n piece.grid(row=r,column=ec+i)\n self.emptyPiece = (r, c)\n self.moveCount += 1\n #self.moveVar.set(self.moveCount)\n elif c==ec: # pieces on same column\n if r < er: # slide down\n numToSlide = er-r\n for i in range(0,numToSlide):\n piece = self.findPieceByRC(er-1-i, c)\n piece.grid(row=er-i,column=c)\n if r > er:\n numToSlide = r-er\n for i in range(0,numToSlide):\n piece = self.findPieceByRC(er+1+i, c)\n piece.grid(row=er+i,column=c)\n self.emptyPiece = (r, c)\n self.moveCount += 1\n #self.moveVar.set(self.moveCount)", "def move(self, i, j):\n board_cpy = copy.deepcopy(self.board)\n piece = self.board[self.selected[0]][self.selected[1]]\n if piece == 0:\n return False\n elif piece.player != self.active_player:\n print(\"it's not your turn\", piece.player, self.active_player)\n return False\n elif self.board[i][j] != 0:\n if piece.player == self.board[i][j].player:\n print(\"can't capture your own pieces\")\n return False\n if i==self.selected[0] and piece.direction == 0:\n if piece.master == False:\n if (i + j - sum(self.selected))%2 == 0:\n print(\"must choose a different color\")\n return False\n if j < self.selected[1]:\n spaces_between = [self.board[i][y] for y in range(j+1, self.selected[1])]\n else:\n spaces_between = [self.board[i][y] for y in range(self.selected[1]+1, j)]\n print(spaces_between)\n for p in spaces_between:\n if p != 0:\n print(\"can't jump pieces\")\n return False\n piece.direction = 1\n self.board[i][j] = piece\n self.board[self.selected[0]][self.selected[1]] = 0\n elif j==self.selected[1] and piece.direction == 1:\n if piece.master == False:\n if (i + j - sum(self.selected))%2 == 0:\n print(\"must choose a different color\")\n return False\n if i < self.selected[0]:\n spaces_between = [self.board[x][j] for x in range(i+1, self.selected[0])]\n else:\n spaces_between = [self.board[x][j] for x in range(self.selected[0]+1, i)] \n print(spaces_between)\n for p in spaces_between:\n if p != 0:\n print(\"can't jump pieces\")\n return False\n piece.direction = 0\n self.board[i][j] = piece\n self.board[self.selected[0]][self.selected[1]] = 0\n else:\n print(\"Invalid movement\")\n return False\n if self.board[i][j] == 0:\n print(\"Something wrong happened\")\n return False\n elif (i==0 or i==self.board_size-1) and (j==0 or j==self.board_size-1):\n self.board[i][j].master = True\n self.active_player = (self.active_player+1)%2\n self.sequence += [board_cpy]\n return True", "def on_click(button):\n global ttt, choices, count, sym, result, x_pos, o_pos\n\n if count % 2 == 0:\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n button.config(\n text=sym,\n state='disabled',\n disabledforeground=\"red\") # For cross\n\n x, y = get_coordinates(button)\n x += 1\n y += 1\n x_pos.append((x, y))\n state = gen_state(to_move='O', x_positions=x_pos,\n o_positions=o_pos)\n try:\n choice = choices.get()\n if \"Random\" in choice:\n a, b = random_player(ttt, state)\n elif \"Pro\" in choice:\n a, b = minimax_decision(state, ttt)\n else:\n a, b = alphabeta_player(ttt, state)\n except (ValueError, IndexError, TypeError) as e:\n disable_game()\n result.set(\"It's a draw :|\")\n return\n if 1 <= a <= 3 and 1 <= b <= 3:\n o_pos.append((a, b))\n button_to_change = get_button(a - 1, b - 1)\n if count % 2 == 0: # Used again, will become handy when user is given the choice of turn.\n sym = \"X\"\n else:\n sym = \"O\"\n count += 1\n\n if check_victory(button):\n result.set(\"You win :)\")\n disable_game()\n else:\n button_to_change.config(text=sym, state='disabled',\n disabledforeground=\"black\")\n if check_victory(button_to_change):\n result.set(\"You lose :(\")\n disable_game()", "def click_callback(self, event):\n # print(\"clicked at \", event.x+self.offset_x, event.y+self.offset_y)\n # x = string.ascii_lowercase[math.ceil((event.x + self.offset_x) / self.width) - 1]\n # y = (math.ceil((event.y + self.offset_y) / self.width) - 9) * -1\n self.clear_moves_on_canvas()\n\n x = math.ceil((event.x + self.offset_x) / self.width) - 1\n y = math.ceil((event.y + self.offset_y) / self.width) - 1\n\n if 0 <= x < 8 and 0 <= y < 8:\n board_value = self.game.board[x][y]\n if self.moving:\n # check if second click isn't on another piece\n if board_value != \"\" and board_value[0] == self.game.current_player_color:\n self.calculate_moves_for_moving_piece(x, y)\n else:\n self.move_piece(x, y) # method moves moving_piece\n self.moving = False\n else:\n self.calculate_moves_for_moving_piece(x, y) # method sets moving_piece", "def move_selected_piece(self):\n return self.selected_piece is not None and list(self.mouse_pos) in self.board.get_move(self.selected_piece)", "def update_board(self, move):\n #new_move equals the gird with selection(Which is the players input)\n new_move = self.grid[move]\n\n # check if column selected by player is full if the first index (top) has a game piece\n if new_move[0] != \" \" :\n return True\n\n # this will get the correct column and add the player's move\n # subtract player column selection by 1 to select correct column\n adjustment = -1\n while new_move[adjustment] != \" \":\n adjustment -= 1\n\n # update the grid with the selected column by the player\n new_move[adjustment] = self.playing_player[1]\n return False", "def choosePosition(player_n, board):\r\n drawBoard(board)\r\n slot_1 = int(raw_input(\"Slot 1 Position Number: > \"))\r\n while slot_1 < 0 or slot_1 >8:\r\n int(raw_input(\"enter valid Position between 0 and 8 : \"))\r\n \r\n board[slot_1] = \"X\"\r\n print \"Player %r: select your second slot number for your ship:\" % player_n\r\n drawBoard(board)\r\n slot_2 = int(raw_input(\"Slot 2 Position Number: > \"))\r\n while slot_1 < 0 or slot_1 >8:\r\n int(raw_input(\"enter valid Position between 0 and 8 : \"))\r\n \r\n while (slot_1 + 3 != slot_2 and slot_1 - 3 != slot_2 and slot_1 + 1 != slot_2 and slot_1 - 1 != slot_2):\r\n print \"Invalid choice! Try again!\"\r\n slot_2 = int(raw_input(\"Slot 2 Position Number: > \"))\r\n board[slot_2] = \"X\"\r\n for index, each in enumerate(board):\r\n if(each != \"X\"):\r\n board[index] = \" \"\r\n else:\r\n board[index] = \"S\"", "def select_our_piece(self):\n return self.board.location(self.mouse_pos) is not None and self.board.location(self.mouse_pos).color == self.turn", "def mouse_click(self,event):\n global drag_sq\n# print \"click at {0} {1}\".format(event.x,event.y)\n# sq = (event.y // sq_size) * 8 + event.x // sq_size\n sq = self.coord_to_sq((event.x, event.y))\n if sq in self.piece_objs:\n drag_sq = sq\n self.canvas.tag_raise(self.piece_objs[sq])\n return", "def player_stage(niv): \n playing = True\n a = niv[0][0] \n b = niv[0][1] \n (x, y) = (a, b) \n state = [[a, b]] #Create a list with the starting point of the selected level patern.\n sense.stick.get_events()\n while playing:\n for event in sense.stick.get_events(): #It moves the pixel with the player moves and add the point passed by the player in the state[].\n if event.action == 'pressed':\n if event.direction == 'left':\n if x > 0:\n x = min(x-1, 7)\n state.append([x, y])\n elif event.direction == 'right':\n if x < 7:\n x = max(x+1, 0)\n state.append([x, y])\n if event.direction == 'down':\n if y < 7:\n y = min(y+1, 7)\n state.append([x, y])\n elif event.direction == 'up':\n if y > 0:\n y = max(y-1, 0)\n state.append([x, y])\n elif event.direction == 'middle':\n playing = False\n sense.set_pixel(x, y, RED)\n if state[:] == niv[:]: #Compare the way choosen by the player with the selected level patern. Results of the try.\n sense.show_message(\"WINNER !\",\n text_colour=LEMON, scroll_speed=0.05)\n sleep(2)\n main() #brings back to the level selection.\n else:\n sense.show_message(\"LOSER !\",\n text_colour=BLUE, scroll_speed=0.05)\n sleep(2)\n try_again(niv) #cf. try_again() function", "def calculate_moves_for_moving_piece(self, x, y):\n # print(\"clicked at \", x, y)\n # print(self.game.board[x][y])\n board_value = self.game.board[x][y]\n player_color = self.game.current_player_color\n if board_value != \"\" and board_value[0] == self.game.current_player_color:\n\n # TODO tu sa musia vediet pohnut okrem krala aj figurky ktore mozu zabranit sachu\n if self.game.is_game_checked and board_value != self.game.current_player_color + \"K\":\n return\n\n self.moving = True\n self.moving_piece_key = board_value\n\n if self.game.current_player_color == \"w\":\n self.moving_piece = self.game.white_pieces[board_value[1:]]\n elif self.game.current_player_color == \"b\":\n self.moving_piece = self.game.black_pieces[board_value[1:]]\n\n piece = self.moving_piece\n\n if piece.color == player_color:\n self.moves = self.moves_manager.get_moves(piece, self.game.board)\n self.show_moves_on_canvas(self.moves)", "def main():\n p.init() # Initializing pygame object\n screen = p.display.set_mode((WIDTH, HEIGHT))\n clock = p.time.Clock()\n screen.fill(p.Color(\"white\"))\n gs = ChessEngine.GameState()\n\n valid_moves = gs.get_valid_moves()\n\n # Flag to control the number of times get valid moves is called\n # Only if the user makes a valid move, it is called\n move_made = False\n\n load_images()\n game_running = True\n\n sq_selected = tuple() # (row, col), keeps track of user click\n player_clicks = list() # 2 tuples in the list, [(row, col), (row, col)]\n\n while game_running:\n\n for e in p.event.get():\n if e.type == p.QUIT:\n game_running = False\n\n elif e.type == p.KEYDOWN:\n if e.key == p.K_z: # undo when 'z' is pressed\n gs.undo_move()\n move_made = True # On undo we need to generate all valid moves again\n\n elif e.type == p.MOUSEBUTTONDOWN:\n location = p.mouse.get_pos() # Gets (col, row) location of mouse click\n row = location[1] // SQ_SIZE\n col = location[0] // SQ_SIZE\n\n # If user clicks on the same square again, i.e. as source and destination,\n # then we deselect it and reset player clicks\n if sq_selected == (row, col):\n sq_selected = tuple()\n player_clicks = list()\n else:\n if not (len(player_clicks) == 0 and gs.board[row][col] == gs.EMPTY_SQ):\n sq_selected = (row, col)\n player_clicks.append(sq_selected) # Append both first and second clicks\n\n # After second click only\n if len(player_clicks) == 2:\n move = ChessEngine.Move(start_sq=player_clicks[0], end_sq=player_clicks[1], board=gs.board)\n # move.print_move()\n for i in range(len(valid_moves)):\n\n if move == valid_moves[i]:\n gs.make_move(valid_moves[i])\n move_made = True\n\n player_clicks = list() # Resetting to restart the 2 click move logic\n sq_selected = tuple()\n if not move_made:\n player_clicks = [sq_selected]\n\n if move_made:\n valid_moves = gs.get_valid_moves()\n move_made = False\n\n draw_game_state(screen, gs)\n clock.tick(MAX_FPS)\n p.display.flip()", "def choose_move(self):\r\n \r\n return None", "def click(self, position):\n w, h = self.window.size\n sx, sy = self.tictactoe.size\n rx, ry = position\n x, y = sx * rx // w, sy * ry // h\n if self.tictactoe.available((x, y)):\n self.choice = (x, y)", "def castling_implement(self, turn, select, move):\n \n if select == self.coords.index(piece_class.KING_LOCATION[turn]):\n if self.board[self.coords.index(piece_class.KING_LOCATION[turn])].move_track == False:\n if move in [2, 58]:\n self.board[move+1] = self.board[move-2]\n self.board[move-2] = self.empty\n if move in [6, 62]:\n self.board[move-1] = self.board[move+1]\n self.board[move+1] = self.empty", "def check_legal(self, cur_pos, new_pos, board, state):\n new_row = self.ind(new_pos)[0]\n new_col = self.ind(new_pos)[1]\n cur_row = self.ind(cur_pos)[0]\n cur_col = self.ind(cur_pos)[1]\n piece = self.piece_type(cur_pos, board)\n\n if state == \"UNFINISHED\":\n if (new_row == cur_row + 3) and (new_col == cur_col + 2): #F5\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col + 1] is not None:\n print(\"hello 1 elephant\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"1for some reason it thinks the new pos has a color of the same piece\")\n return\n print(\"elephant moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col - 2): #B1\n print(\"Hello im here\")\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col - 1] is not None:\n print(\"horse attempted to move left and up the board\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return\n print(\"e moved up and left\")\n return True\n\n elif (new_row == cur_row + 3) and (new_col == cur_col - 2): #\n # checking left and right are valid\n if board[cur_row + 1][cur_col] and board[cur_row + 2][cur_col - 1] is not None:\n print(\"hello e3\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"e moved down and right\")\n return True\n\n elif (new_row == cur_row - 3) and (new_col == cur_col + 2): #F1\n # checking left and right are valid\n if board[cur_row - 1][cur_col] and board[cur_row - 2][cur_col + 1] is not None:\n print(\"hello e4\")\n return False\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n return False\n print(\"Horse moved down and left 2\")\n return True\n #---------------------------------------------------------------------------------------------------------------\n # Check if the forwards and backwards is legal\n elif (new_row == cur_row - 2) and (new_col == cur_col + 3): #G2\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col + 2] is not None:\n print(\"hello e5\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 5e\")\n return\n print(\"it worked e5\")\n return True\n\n elif (new_row == cur_row - 2) and (new_col == cur_col - 3): #A2\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello e6\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 6e\")\n return\n print(\"it worked e6\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col + 3): #G6\n # checking left and right are valid\n if board[cur_row][cur_col + 1] and board[cur_row - 1][cur_col - 2] is not None:\n print(\"hello 7e\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"ebye 7\")\n return\n print(\"it worked e7\")\n return True\n\n elif (new_row == cur_row + 2) and (new_col == cur_col - 3): #A6\n # checking left and right are valid\n if board[cur_row][cur_col - 1] and board[cur_row + 1][cur_col - 2] is not None:\n print(\"hello 8\")\n return\n elif self.piece_type(new_pos, board) is not None and self.piece_type(new_pos, board).get_color() == self._color:\n print(\"bye 8\")\n return\n print(\"it worked 8\")\n return True\n# else:\n # print(\"it actually never entered the if statement?\"\n #return False\n else:\n print(\"False\")\n return False", "def capture(board,player,possible_moves, pos, loc, i):\r\n \r\n if player == 1 and encode(loc+i) in board.black:\r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2)) \r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n if player == -1 and encode(loc+i) in board.white: \r\n next_piece = encode(loc+i) \r\n new_pos = int(loc + (i*2))\r\n if not((pos[0] == 'B') and ((next_piece)[0] == 'A')) and not((pos[0] == 'G') and ((next_piece)[0] == 'H')):\r\n new_enc = encode(new_pos)\r\n if new_pos in range(0,112) and (new_enc not in board.white) and (new_enc not in board.black) and (new_enc not in board.empty):\r\n possible_moves.append([pos,new_enc,next_piece])\r\n return True\r\n \r\n return False", "def mouseClick(self, event):\n if self.editMode:\n self.applyEditing(event)\n self.clearEditCursor(event)\n return\n x = (event.y - self.margin) // self.cellSize\n y = (event.x - self.margin) // self.cellSize\n if self.checkFree(x, y) == self.colors['busy']:\n return # clicked busy position\n self.onBoard += 1\n self.refreshScore()\n self.history.append((\n self.setBusy(x, y),\n self.addPentomino(x, y)\n ))\n if self.onBoard == self.expectedBest:\n self.gameOver()", "def choose_point_command(a):\n global canvas, best_line, list_best_label_distance, label_text_result\n if choose_point[0] != a and choose_point[1] != a: # if a was not be choose\n if choose_point[0] == -1 and choose_point[1] == -1:\n choose_point[0] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\") # Change color of point\n elif choose_point[0] != -1 and choose_point[1] == -1:\n choose_point[1] = a\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n best_line = dijkstra(data, amount_point_var, choose_point[0], choose_point[1]) # Find best line\n if best_line is not None:\n draw_bestline(best_line[\"path\"], canvas, list_position) # Draw best line with difference color\n\n # Draw best distance with difference color\n list_best_label_distance = draw_best_distance(best_line[\"path\"], data, canvas, list_position, 0.1)\n # Draw result\n text = draw_result(canvas, best_line, data)\n label_text_result = Label(canvas, text=text, height=4, wraplength=150, bg='lawn green')\n label_text_result.pack(pady=100, padx=10, anchor=NW)\n\n else:\n messagebox.showwarning(\"Warning\", \"Not exist path from point{} to point{}\"\n .format(choose_point[0]+1, choose_point[1]+1))\n elif choose_point[0] != -1 and choose_point[1] != -1:\n list_point[choose_point[0]].configure(bg=point_color, fg=\"black\")\n list_point[choose_point[1]].configure(bg=point_color, fg=\"black\")\n choose_point[0] = a\n choose_point[1] = -1 # Uncheck\n list_point[a].configure(bg=point_color_choose, fg=\"white\")\n canvas.delete(\"best_line_tag\")\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[0] == a:\n if choose_point[1] == -1:\n choose_point[0] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n else:\n choose_point[a] = -1 # Uncheck\n list_point[a].configure(bg=point_color, fg=\"black\")\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()\n elif choose_point[1] == a:\n list_point[a].configure(bg=point_color, fg=\"black\")\n choose_point[1] = -1\n canvas.delete(\"best_line_tag\") # delete best line to refresh\n for i in range(len(list_best_label_distance)):\n list_best_label_distance[i].destroy()\n list_best_label_distance = []\n label_text_result.destroy()", "def selectionner(self, event):\n try:\n # On trouve le numéro de ligne/colonne en divisant par le nombre de pixels par case.\n # event.widget représente ici un des 9 canvas !\n ligne = event.y // event.widget.taille_case\n colonne = event.x // event.widget.taille_case\n\n # On verifie si la case est gagnante\n if self.partie.uplateau[event.widget.plateau.cordonnees_parent].est_gagnant(\"X\")\\\n or self.partie.uplateau[event.widget.plateau.cordonnees_parent].est_gagnant(\"O\"):\n raise ErreurCase(\"Le plateau est déja gagnant\")\n\n # On verifie si la case clicé est vide\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].cases[ligne, colonne].est_vide():\n raise ErreurCase(\"La case est déjà prise !\")\n\n # On verifie si la position est valide\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].position_valide(ligne, colonne):\n raise ErreurCase(\"La position n'est pas valide !\")\n\n # On verifie si on clic dans la bonne prochaine case\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].position_valide(ligne, colonne):\n raise ErreurCase(\"Ce tour doit être joué dans la case en rouge !\")\n\n self.afficher_message(\"Case sélectionnée à la position (({},{}),({},{})).\"\n .format(event.widget.plateau.cordonnees_parent[0],\n event.widget.plateau.cordonnees_parent[1],\n ligne, colonne))\n\n # On dessine le pion dans le canvas, au centre de la case.\n # On utilise l'attribut \"tags\" pour être en mesure de récupérer\n # les éléments dans le canvas afin de les effacer par exemple.\n coordonnee_y = ligne * event.widget.taille_case + event.widget.taille_case // 2\n coordonnee_x = colonne * event.widget.taille_case + event.widget.taille_case // 2\n event.widget.create_text(coordonnee_x, coordonnee_y, text=self.partie.joueur_courant.pion,\n font=('Helvetica', event.widget.taille_case // 2), tags='pion')\n\n # Mettre à jour la case sélectionnée\n self.partie.uplateau[event.widget.plateau.cordonnees_parent] \\\n .selectionner_case(ligne, colonne, self.partie.joueur_courant.pion)\n\n # Changer le joueur courant.\n # Vous pouvez modifier ou déplacer ce code dans une autre méthode selon votre propre solution.\n if self.partie.joueur_courant == self.partie.joueurs[0]:\n self.partie.joueur_courant = self.partie.joueurs[1]\n else:\n self.partie.joueur_courant = self.partie.joueurs[0]\n\n # Effacer le contenu du widget (canvas) et du plateau (dictionnaire) quand ce dernier devient plein.\n # Vous pouvez modifier ou déplacer ce code dans une autre méthode selon votre propre solution.\n if not event.widget.plateau.non_plein() and not event.widget.plateau.est_gagnant(\"X\") \\\n and not event.widget.plateau.est_gagnant(\"O\"):\n event.widget.delete('pion')\n event.widget.plateau.initialiser()\n # Afficher label de partie nule\n self.afficher_message(\"Partie nule dans la case({},{}).\"\n .format(event.widget.plateau.cordonnees_parent[0],\n event.widget.plateau.cordonnees_parent[1]))\n\n # Augmenter le nombre de parties nules de 1\n self.partie.nb_parties_nulles += 1\n\n except ErreurCase as e:\n self.afficher_message(str(e), color='red')", "def menuSelection(self):\n \n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n while self.selection not in self.menu.index:\n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n while menuCheck.lower() not in ['yes', 'y', 'no', 'n']:\n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n \n os.system('clear')", "def user_control(board, x_player, y_player, button_pressed, inventory):\n\n red = '\\033[31m'\n reset_color = '\\033[0m'\n item_colors = {\n '●': '\\033[33m', '⚛': '\\033[34m', '✿': '\\033[31m', '✡': '\\033[94m',\n '♦': '\\033[32m', 'ᴥ': '\\033[31m', '☀': '\\033[33m'}\n place_on_right_side = board[y_player][x_player + 1]\n place_on_left_side = board[y_player][x_player - 1]\n place_on_up_side = board[y_player - 1][x_player]\n place_on_down_side = board[y_player + 1][x_player]\n places_prohibited_to_stand_on = [\n 'X', red + '#' + reset_color, '☹', '☃', '♞', '☻', '☬', item_colors['☀'] + '☀' + reset_color, red\n + '☀' + reset_color]\n\n if button_pressed == 'd' and place_on_right_side not in places_prohibited_to_stand_on:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side not in places_prohibited_to_stand_on:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side not in places_prohibited_to_stand_on:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side not in places_prohibited_to_stand_on:\n y_player += 1\n\n friends = ['☹', '☃', '♞', '☻', '☬']\n # conditions for level 4 (feeding friends)\n if button_pressed == 'd' and place_on_right_side in friends and inventory['●'] > 19:\n x_player += 1\n elif button_pressed == 'a' and place_on_left_side in friends and inventory['●'] > 19:\n x_player -= 1\n elif button_pressed == 'w' and place_on_up_side in friends and inventory['●'] > 19:\n y_player -= 1\n elif button_pressed == 's' and place_on_down_side in friends and inventory['●'] > 19:\n y_player += 1\n return x_player, y_player", "def test_which_to_press():\n # Format: which_to_press(history, displayed)\n # History Format: (pressed, displayed) tuple list.\n\n x = [(1, 4), (25, 100)]\n\n # If display shows 1, always return 4.\n assert(which_to_press(None, 1) == 4)\n\n # If display shows 2, return value pressed in previous round.\n assert(which_to_press(x, 2) == 4)\n\n # If display shows 3, return value displayed in previous round.\n assert(which_to_press(x, 3) == 25)\n\n # If display shows 4, press the button with the label that\n # matches the larger of the pressed and the displayed values from\n # round floor(completed_rounds / 2) + 1 (round 2 in this case)\n assert(which_to_press(x, 4) == 100)", "def getPossibleMoves(self): # called to get possible positions this piece can go\r\n \r\n moves = {}\r\n\r\n ids = []\r\n\r\n for piece in self.board.pieces.values():\r\n if piece.name == \"empty\":\r\n piece.glow = False\r\n piece.ready = False\r\n\r\n self.piece = self\r\n\r\n def check(direction=\"left\", heading=\"north\", x=None, y=None):\r\n piece = self.piece\r\n if direction == \"left\": x -= 50\r\n else: x += 50\r\n\r\n if heading == \"north\": y -= 50\r\n else: y += 50\r\n\r\n if (x, y) in self.board.pieces: # position is empty\r\n empty = self.board.getPiece((x, y))\r\n empty.glow = True\r\n old, new, obj = (direction, heading), (x, y), piece\r\n identity = self.getRandomID(ids) # get an ID for the move\r\n moves[identity] = old, new, obj\r\n\r\n if piece.isKing: # piece is a king, so go on\r\n check(direction, heading, x, y)\r\n else: # its not empty, so check if its comrade\r\n x1, y1 = x+25, y+25\r\n piece2 = self.board.getPiece((x1, y1))\r\n try:\r\n if piece.isComrade(piece2):# piece is comrade so return\r\n return\r\n else: # piece is not comrade, so check empty\r\n if direction == \"left\": x2 = x1-25-50\r\n else: x2 = x1-25+50\r\n\r\n if heading == \"north\": y2 = y1-25-50\r\n else: y2 = y1-25+50\r\n\r\n if (x2, y2) in self.board.pieces: # its empty, so notify player\r\n empty = self.board.getPiece((x2, y2))\r\n empty.glow = True\r\n empty.ready = True\r\n\r\n old, new, obj = (direction, heading), (x2, y2), piece2\r\n identity = self.getRandomID(ids)\r\n moves[identity] = old, new, obj\r\n\r\n check(direction, heading, piece2.x-25, piece2.y-25)\r\n check(direction, heading, x2, y2)\r\n \r\n # check empty or comrade again\r\n if direction == \"left\": x3 = x2-50\r\n else: x3 = x2+50\r\n\r\n if heading == \"north\": y3 = y2-50\r\n else: y3 = y2+50\r\n\r\n if (x3, y3) in self.board.pieces: # positon(address) is empty\r\n return\r\n else: # there is a piece, so check if comrade, stop, if not comrade continue\r\n x3+=25\r\n y3+= 25\r\n\r\n piece3 = self.board.getPiece((x3, y3))\r\n if piece3.isComrade(piece2): # comrades, so stop\r\n return\r\n else: # not comrades, so continue\r\n self.piece = piece3\r\n check(direction, heading, x, y)\r\n\r\n #self.piece = piece2\r\n \r\n #check(direction, heading, x2, y2) # keep searching\r\n else: # its not empty, so return\r\n return\r\n except:\r\n pass\r\n\r\n if self.piece.name == \"white\": direction = \"north\"\r\n else: direction = \"south\"\r\n \r\n check(\"left\", direction, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", direction, self.piece.x-25, self.piece.y-25)\r\n \r\n if self.piece.isKing:\r\n if self.piece.name == \"white\": heading = \"south\"\r\n else: heading = \"north\"\r\n \r\n check(\"left\", heading, self.piece.x-25, self.piece.y-25)\r\n check(\"right\", heading, self.piece.x-25, self.piece.y-25)\r\n\r\n if self.piece.name == \"white\":\r\n eatMoves = self.board.game.thinkEatMoves(moves, \"person\")\r\n if eatMoves is not None:\r\n return eatMoves\r\n\r\n return moves", "def placement(self,event):\r\n x,y,ship=event.x,event.y,False\r\n [xmin,ymin,xmax,ymax] = self.can.coords(self.hitbox[self.select])\r\n k=2\r\n if self.select==1 or self.select==2 or self.select==0:k=1\r\n axe,a,b=1,0*k,-1*k\r\n if xmax-xmin == 46:axe,a,b=0*k,-1*k,0\r\n x,y=(x-20)//46,(y-96)//46\r\n if self.select!=-1:\r\n ship=self.game.j1.replace_ship(x+b,y+a,self.select,axe)\r\n if 0<=x<=11 and 0<=y<=11:\r\n self.game.j1.main_ship(x+b,y+a,self.select,axe)\r\n self.game.j1.affichage()", "def checkLegalMove(self, initialPosition, destinationPosition, colorIndex):\n checkColor = self.grid.REPRESENTATION[colorIndex]\n otherColor = self.grid.REPRESENTATION[1-colorIndex]\n emptyColor = self.grid.REPRESENTATION[2]\n if self.grid[initialPosition] != checkColor:\n print 'The piece you are trying to move is not yours! Please reselect your move.'\n return False\n if self.grid[destinationPosition] != emptyColor:\n print 'The destination position of your move is not empty! Please reselect your move.'\n return False\n if initialPosition == destinationPosition:\n print 'The initial and destination position of your move are the same. Please reselect your move.'\n return False\n\n if initialPosition[0] == destinationPosition[0]:\n x = initialPosition[0]\n if (destinationPosition[1] - initialPosition[1]) %2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[1] < destinationPosition[1]:\n for i in range(initialPosition[1]+1, destinationPosition[1], 2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i+1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[1]-1, destinationPosition[1], -2):\n if self.grid[(x, i)] != otherColor or self.grid[(x, i-1)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n elif initialPosition[1] == destinationPosition[1]:\n y = initialPosition[1]\n if (destinationPosition[0] - initialPosition[0])%2 != 0:\n print 'Invalid move! Please reselect your move.'\n return False\n if initialPosition[0] < destinationPosition[0]:\n for i in range(initialPosition[0]+1, destinationPosition[0], 2):\n if self.grid[(i, y)] != otherColor or self.grid[(i+1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n else:\n for i in range(initialPosition[0]-1, destinationPosition[0], -2):\n if self.grid[(i, y)] != otherColor or self.grid[(i-1, y)] != emptyColor:\n print 'Invalid move! Please reselect your move.'\n return False\n return True\n # make turns\n print 'Making turns is invalid move! Please reselect your move.'\n return False", "def respond_to_move(self, move):\n\n # this will get the piece at the queried position,\n # will notify user if there is no piece there\n current_algebraic, new_algebraic = move\n row, column = self.algebraic_mapped_to_position[current_algebraic]\n if self.board[row][column] == empty_square:\n print(\"There is no piece at %s\" % (current_algebraic,))\n return\n piece, location = self.board[row][column]\n\n # this will get all possible moves from this position\n # and will make the move if the new position is a\n # valid move\n piece_name = self.piece_names[piece]\n moves = self.moves[piece_name]((row, column))\n \n new_row, new_column = self.algebraic_mapped_to_position[new_algebraic]\n print(\"old position %s, %s\" % (row, column))\n print(\"new algebraic %s\" % new_algebraic)\n print(\"new position %s, %s\" % (new_row, new_column))\n print(\"moves %s\" % moves)\n if (new_row, new_column) in moves:\n # this will change the game board to reflect the move\n self.board[row][column] = empty_square\n self.board[new_row][new_column] = piece+location" ]
[ "0.7565643", "0.66341156", "0.6557111", "0.6449328", "0.63517064", "0.6219487", "0.61956", "0.61922944", "0.6161466", "0.6128278", "0.61244667", "0.6082774", "0.6044222", "0.60180783", "0.5978442", "0.5978442", "0.59686", "0.5960028", "0.59480584", "0.5931471", "0.58620036", "0.5849294", "0.5845106", "0.5844595", "0.5817512", "0.5809941", "0.58027506", "0.5793837", "0.5790552", "0.57857674" ]
0.6903945
1
clicked on selection_bar | | | Yes No | | selected from where or not selected is it his piece | | | | | | | selection_bar board nothing selected yes no \ | / | | \ | / blit cover nothing to do \ | / (if clicked piece is his piece == True) else nothing to do and if its availability is their \ / | \ / | \ / | \ / | \/ | change selection select selection
def selection_board_maintenance(self,x_cor,y_cor): for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.display.quit() pygame.quit() quit() if event.type == pygame.MOUSEBUTTONDOWN: #print("mouse is pressed") #everything begins here x_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor, y_cor, 'selection_bar') #print(who_is_clicked) if (self.selected_from_selection_bar + self.selected_from_board): #print("inside selected item one") if Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked): if self.pieces[piece].availability: self.selected_from_board = False self.selected_from_selection_bar = True #update self.selected_piece = piece self.selected_position =Helping_Class.selection_bar_reverse_mapping[piece] else: #nothing to do pass else: #print("nothing is selected") #check if clicked on his piece change then select it if Helping_Class._check_if_clicked_on_his_own_piece_(self.whose_move,who_is_clicked): if self.pieces[piece].availability: self.selected_from_selection_bar = True #update self.selected_piece = piece self.selected_position =(x_adjusted,y_adjusted) #print(self.selected_piece,self.selected_position,self.selected_from_selection_bar) else: #nothing to do pass else: #color change #who_is_clicked is dummy variable as no click has occurred x_adjusted,y_adjusted,who_is_clicked,piece = Helping_Class.convert_coordinate(x_cor, y_cor, 'selection_bar') self.blit_piece = [(x_adjusted,y_adjusted),piece]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def select_me(self, mouse_pos):\r\n\t\t#self.active = self.rect.collidepoint(mouse_pos)\r\n\t\tself.active = True", "def menuSelection(self):\n \n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n while self.selection not in self.menu.index:\n self.selection = int(input(\"\\nWhere do you want to go? Make a selection: \"))\n \n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n while menuCheck.lower() not in ['yes', 'y', 'no', 'n']:\n menuCheck = str(input(str(\"\\n\" + self.menu.ix[self.selection]['name']) + \" eh? I hope it's good, People say: \" + \\\n str(self.menu.ix[self.selection]['snippet_text']) + \"\\n\\nIs this where you want to go? (Yes (y) or No (n)) \"))\n \n os.system('clear')", "def select_me(self, mouse_pos):\r\n\t\tself.active = self.rect.collidepoint(mouse_pos)", "def main_board_maintenance(self,x_cor,y_cor):\r\n\t\r\n\t\tfor event in pygame.event.get(): \r\n\r\n\t\t\tif event.type == pygame.QUIT:\r\n\t\t\t\tpygame.display.quit()\r\n\t\t\t\tpygame.quit()\r\n\t\t\t\tquit()\r\n\t\t\t\r\n\t\t\tif event.type == pygame.MOUSEBUTTONDOWN:\r\n\r\n\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t#print(x_adjusted/80,y_adjusted/80)\r\n\r\n\t\t\t\tif self.selected_from_selection_bar :\r\n\t\t\t\t\t#print('inside selection bar selection option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\ttemp_game_state = CP.game_data()\r\n\t\t\t\t\ttemp_game_state = copy.deepcopy(self.game_state)\r\n\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,Helping_Class.selection_bar_reverse_mapping[self.selected_piece] ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\ttemp_game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\ttemp_game_state.active_color = not temp_game_state.active_color\r\n\t\t\t\t\tfen = temp_game_state.generate_fen()\r\n\t\t\t\t\tboard2 = chess.Board(fen=fen)\r\n\t\t\t\t\tprint(board2)\r\n\t\t\t\t\tprint(fen)\r\n\t\t\t\t\tprint('board2.is_check()',board2.is_check())\r\n\t\t\t\t\t\r\n\t\t\t\t\t#now we need to place the piece on board\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)] == None:\r\n\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\tif not board2.is_check():\r\n\t\t\t\t\t\t\tif self._check_valid_position_(x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\t\tself.place_piece_on_board_from_selection_bar(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\t#rajan's\r\n\t\t\t\t\t\t\t\t#print(self.selected_piece)\r\n\t\t\t\t\t\t\t\t#print(self.selected_position)\r\n\t\t\t\t\t\t\t\tdata_convert = CP.Conversion_of_postion_name(self.selected_piece,self.selected_position ,(x_adjusted,y_adjusted))\r\n\t\t\t\t\t\t\t\tself.game_state.update(data_convert.piece, int(data_convert.i_pos_ani()), int(data_convert.f_pos_ani()))\r\n\t\t\t\t\t\t\t\tself.selected_piece = None\r\n\t\t\t\t\t\t\t\tself.selected_position = None\r\n\r\n\t\t\t\t\t\t\t\tself.computer_turn =True\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t#board position is filled then nothing to do\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#if his piece change selection\r\n\t\t\t\t\t\tself.selected_from_selection_bar =False\r\n\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\r\n\r\n\t\t\t\telif self.selected_from_board:\r\n\t\t\t\t\t#print('inside selection bar board option')\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\t\r\n\t\t\t\t\tomega = True\r\n\t\t\t\t\tif self.selected_position:\r\n\t\t\t\t\t\tif self.selected_position == (x_adjusted,y_adjusted):\r\n\t\t\t\t\t\t\tomega = False\r\n\t\t\t\t\t#print(self.selected_position,(x_adjusted,y_adjusted))\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tmove = self._check_valid_move_(x_adjusted,y_adjusted)\r\n\t\t\t\t\t\tprint(move)\r\n\t\t\t\t\tif omega:\r\n\t\t\t\t\t\tif move:\r\n\t\t\t\t\t\t\tself.computer_turn = True\r\n\t\t\t\t\t\t\t#if move contains x then we have update state of captured piece\r\n\t\t\t\t\t\t\t#else just update selected piece\r\n\t\t\t\t\t\t\t#print(\"correct move\")\r\n\t\t\t\t\t\t\tself.capture_piece_update_board_or_place_piece(move,x_adjusted,y_adjusted)\r\n\r\n\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\r\n\t\t\t\telse:\r\n\t\t\t\t\t\r\n\t\t\t\t\tx_adjusted,y_adjusted = Helping_Class.convert_coordinate(x_cor,y_cor,from_where ='board')\r\n\t\t\t\t\tif self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]:\r\n\t\t\t\t\t\t#select the piece\r\n\t\t\t\t\t\tif self.whose_move == 'white':\r\n\t\t\t\t\t\t\tif 'W' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\t\telif self.whose_move == 'black':\r\n\t\t\t\t\t\t\tif 'B' in self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]:\r\n\t\t\t\t\t\t\t\tself.selected_piece = self.anirudh_2_pritish[self.game_state.board[Helping_Class.convert_my_coordinate_to_anirudh(x_adjusted,y_adjusted)]]\r\n\t\t\t\t\t\t\t\tself.selected_from_board = True\r\n\t\t\t\t\t\t\t\tself.selected_position = (x_adjusted,y_adjusted)\r\n\t\t\t\t\t\t\telse:\r\n\t\t\t\t\t\t\t\t#nothing to do\r\n\t\t\t\t\t\t\t\tpass\r\n\t\t\t\t\telse:\r\n\t\t\t\t\t\t#it is none means nothing is their so nothing to do\r\n\t\t\t\t\t\tpass\r\n\t\t\t\t\t\r\n\t\t\t\r\n\r\n\t\t\telse:\r\n\t\t\t\t#print(\"not_pressed\")\r\n\t\t\t\tpass", "def clicked_glycan(self, event):\n #tab = self.tab_control.tab(self.tab_control.select(), \"text\")\n tab = self.tab_control.index(self.tab_control.select())\n item = event.widget.find_closest(event.x, event.y)\n idx = int(event.widget.gettags(item)[0])\n \n if self.selected_canvas:\n self.selected_canvas.delete(self.selection)\n\n if tab == 0:\n self.selected_canvas = self.common_canvas[idx]\n self.selected_glycan = self.common_glycans.items()[idx] \n elif tab == 1:\n self.selected_canvas = self.user_canvas[idx]\n self.selected_glycan = self.user_glycans.items()[idx] \n self.selection = self.selected_canvas.create_rectangle(0, 0, 100, 100, outline='red', width=6)", "def isSelected(*args):", "def isSelected(*args):", "def select(self, coord: Coord) -> None:\n active_team = self.units_manager.active_team\n self.prev_sel = self.curr_sel\n self.curr_sel = coord\n\n if self.prev_sel is None:\n # Nothing has been previously selected\n sel_unit = self.get_unit(coord)\n if sel_unit is None or sel_unit.played:\n self.move_area = []\n self.attack_area = []\n self.update_highlight()\n else:\n # Show the currently selected unit's move and attack area\n self.update_move_area()\n self.move_attack_area()\n self.update_highlight()\n else:\n # Something has been previously selected\n if self.prev_unit is not None and self.curr_unit is not None:\n # Selected a unit two times\n if self.prev_sel == self.curr_sel and not self.prev_unit.played and active_team.is_mine(self.prev_unit):\n # Two times on the same playable unit. Show the action menu.\n self.action_menu()\n elif self.curr_sel in self.attack_area:\n # Two different units: prev_unit can attack curr_unit\n # This results in a combined action: move the unit next to the enemy and propose the user to attack\n target_unit = self.curr_unit\n nearest = self.arrow.path[-1] if self.arrow.path else self.prev_sel\n if self.nearby_enemies(self.prev_unit, nearest):\n animation = self.make_move_unit_animation(self.prev_unit, nearest, self.arrow.path)\n self.add_move_unit_animation(animation)\n self.move_unit(self.prev_unit, nearest)\n self.curr_sel = nearest # otherwise move_undo will move back the defending unit!\n self.still_attack_area()\n self.update_highlight()\n self.action_menu(attacking=self.curr_unit, defending=target_unit)\n else:\n self.reset_selection()\n else:\n # Two different units: prev_unit can't attack curr_unit\n # show the current unit's move and attack area\n self.update_move_area()\n self.move_attack_area()\n self.update_highlight()\n elif self.can_selection_move():\n # Move the previously selected unit to the currently selected coordinate.\n animation = self.make_move_unit_animation(self.prev_unit, self.curr_sel, self.arrow.path)\n self.add_move_unit_animation(animation)\n self.move_unit(self.prev_unit, self.curr_sel)\n self.still_attack_area()\n self.update_highlight()\n self.action_menu()\n else:\n # Previously something irrelevant was chosen\n self.reset_selection()\n self.curr_sel = coord\n\n if self.curr_unit is not None and not self.curr_unit.played:\n # Selected a unit: show its move and attack area\n self.update_move_area()\n self.move_attack_area()\n\n self.update_highlight()\n\n self.arrow.set_path([])", "def interact(self,mouseY):\n index = floor((mouseY+self.scroll-50)/150)-1\n if index >= -1 and index < len(self.itemList.items):\n self.selected = index\n #i*150+50-self.scroll", "def choose_option(friendly,enemy,opt1=\"Fight\",opt2=\"Bag\",opt3=\"Pokemon\",opt4 = \"Run\"):\n background_color = blit_background()[1]\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n pygame.display.update()\n pause(friendly,enemy,3) #to stop the click from 1st menu selecting option in second\n mouse_pos = 0,0\n while True:\n event_check(False, friendly,enemy)\n blit_background()\n opt_1 = pygame.draw.rect(screen,((background_color)),(60,540,300,70))\n blit_text(opt1,(70,545))\n opt_3 = pygame.draw.rect(screen,(background_color),(60,615,300,70))\n blit_text(opt2,(70,620))\n opt_2 = pygame.draw.rect(screen,(background_color),(360,540,300,70))\n blit_text(opt3,(370,545))\n opt_4 = pygame.draw.rect(screen,(background_color),(360,615,300,70))\n blit_text(opt4,(370,620))\n mouse_pos = get_click()\n blit_friendly(friendly)\n blit_enemy(enemy)\n blit_health(friendly,enemy)\n blit_text(\"What will you do?\",(800,580))\n pygame.display.update()\n if opt_1.collidepoint(mouse_pos):\n option = 1\n break\n elif opt_2.collidepoint(mouse_pos):\n option = 2\n break\n elif opt_3.collidepoint(mouse_pos):\n option = 3\n break\n elif opt_4.collidepoint(mouse_pos):\n option = 4\n break\n pygame.display.update()\n return option", "def apply_selection(self, rv, index, is_selected):\r\n self.selected = is_selected", "def draw_selected(self):\n if self.get_selected() is not None and not self.check_if_locked(self.get_selected()):\n self.color_cell(pos=self.get_selected(\n ), color=SELECTED_INVALID if self.get_selected() in self.invalid else SELECTED)", "def __update_selection(self):\n if self.selected_offset != self.old_selected_offset:\n if self.old_selected_offset > -1:\n old_offset = (self.old_selected_offset - self.top_offset) * 8\n\n self.display.text(\">\", 0, old_offset, 0)\n\n new_offset = (self.selected_offset - self.top_offset) * 8\n self.display.text(\">\", 0, new_offset, 1)\n self.display.show()\n self.old_selected_offset = self.selected_offset", "def on_selected(self):\n self.colour = self.selected_colour\n self.is_selected = True\n self.redraw()", "def use_triggered(self):\n\n self.select_items()\n if self.items_selected:\n for index, item in enumerate(self.items_selected):\n index_selected = self.indices_selected[index]\n frame_selected = index_selected + 1\n item.setText(\"Frame %i included\" % frame_selected)\n item.setBackground(self.background_included)\n item.setForeground(QtGui.QColor(0, 0, 0))\n self.index_included[index_selected] = True\n self.frame_selector.setPhoto(self.frame_index)", "def changeTool(event):\n global active_colour, active_note, size\n\n if ((event.x > red_x1) & (event.x < red_x2) & (event.y > red_y1) & (event.y < red_y2)):\n active_colour = \"red\"\n active_note = 'A'\n elif ((event.x > orange_x1) & (event.x < orange_x2) & (event.y > orange_y1) & (event.y < orange_y2)):\n active_colour = \"orange\" \n active_note = 'B' \n elif ((event.x > yellow_x1) & (event.x < yellow_x2) & (event.y > yellow_y1) & (event.y < yellow_y2)):\n active_colour = \"yellow\"\n active_note = 'C'\n elif ((event.x > green_x1) & (event.x < green_x2) & (event.y > green_y1) & (event.y < green_y2)):\n active_colour = \"green\"\n active_note = 'D'\n elif ((event.x > blue_x1) & (event.x < blue_x2) & (event.y > blue_y1) & (event.y < blue_y2)):\n active_colour = \"blue\"\n active_note = 'E'\n elif ((event.x > indigo_x1) & (event.x < indigo_x2) & (event.y > indigo_y1) & (event.y < indigo_y2)):\n active_colour = \"indigo\"\n active_note = 'F'\n elif ((event.x > violet_x1) & (event.x < violet_x2) & (event.y > violet_y1) & (event.y < violet_y2)):\n active_colour = \"violet\"\n active_note = 'G'\n elif ((event.x > thin_x1) & (event.x < thin_x2) & (event.y > thin_y1 - 15) & (event.y < thin_y2 + 15)):\n size = 2\n elif ((event.x > medium_x1) & (event.x < medium_x2) & (event.y > medium_y1 - 10) & (event.y < medium_y2 + 10)):\n size = 5\n elif ((event.x > thick_x1) & (event.x < thick_x2) & (event.y > thick_y1 - 5) & (event.y < thick_y2 + 5)):\n size = 10\n\n t.coords(current_option, 133, 40 - size, 168, 41 + size)\n t.itemconfig(current_option , fill = active_colour, outline = active_colour)\n current_note.config(text=active_note)", "def selectionner(self, event):\n try:\n # On trouve le numéro de ligne/colonne en divisant par le nombre de pixels par case.\n # event.widget représente ici un des 9 canvas !\n ligne = event.y // event.widget.taille_case\n colonne = event.x // event.widget.taille_case\n\n # On verifie si la case est gagnante\n if self.partie.uplateau[event.widget.plateau.cordonnees_parent].est_gagnant(\"X\")\\\n or self.partie.uplateau[event.widget.plateau.cordonnees_parent].est_gagnant(\"O\"):\n raise ErreurCase(\"Le plateau est déja gagnant\")\n\n # On verifie si la case clicé est vide\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].cases[ligne, colonne].est_vide():\n raise ErreurCase(\"La case est déjà prise !\")\n\n # On verifie si la position est valide\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].position_valide(ligne, colonne):\n raise ErreurCase(\"La position n'est pas valide !\")\n\n # On verifie si on clic dans la bonne prochaine case\n if not self.partie.uplateau[event.widget.plateau.cordonnees_parent].position_valide(ligne, colonne):\n raise ErreurCase(\"Ce tour doit être joué dans la case en rouge !\")\n\n self.afficher_message(\"Case sélectionnée à la position (({},{}),({},{})).\"\n .format(event.widget.plateau.cordonnees_parent[0],\n event.widget.plateau.cordonnees_parent[1],\n ligne, colonne))\n\n # On dessine le pion dans le canvas, au centre de la case.\n # On utilise l'attribut \"tags\" pour être en mesure de récupérer\n # les éléments dans le canvas afin de les effacer par exemple.\n coordonnee_y = ligne * event.widget.taille_case + event.widget.taille_case // 2\n coordonnee_x = colonne * event.widget.taille_case + event.widget.taille_case // 2\n event.widget.create_text(coordonnee_x, coordonnee_y, text=self.partie.joueur_courant.pion,\n font=('Helvetica', event.widget.taille_case // 2), tags='pion')\n\n # Mettre à jour la case sélectionnée\n self.partie.uplateau[event.widget.plateau.cordonnees_parent] \\\n .selectionner_case(ligne, colonne, self.partie.joueur_courant.pion)\n\n # Changer le joueur courant.\n # Vous pouvez modifier ou déplacer ce code dans une autre méthode selon votre propre solution.\n if self.partie.joueur_courant == self.partie.joueurs[0]:\n self.partie.joueur_courant = self.partie.joueurs[1]\n else:\n self.partie.joueur_courant = self.partie.joueurs[0]\n\n # Effacer le contenu du widget (canvas) et du plateau (dictionnaire) quand ce dernier devient plein.\n # Vous pouvez modifier ou déplacer ce code dans une autre méthode selon votre propre solution.\n if not event.widget.plateau.non_plein() and not event.widget.plateau.est_gagnant(\"X\") \\\n and not event.widget.plateau.est_gagnant(\"O\"):\n event.widget.delete('pion')\n event.widget.plateau.initialiser()\n # Afficher label de partie nule\n self.afficher_message(\"Partie nule dans la case({},{}).\"\n .format(event.widget.plateau.cordonnees_parent[0],\n event.widget.plateau.cordonnees_parent[1]))\n\n # Augmenter le nombre de parties nules de 1\n self.partie.nb_parties_nulles += 1\n\n except ErreurCase as e:\n self.afficher_message(str(e), color='red')", "def slot_selectPoint(self, selectionDict):\n\t\tprint('bStackWidget.slot_selectPoint() selectionDict:', selectionDict)\n\t\tif selectionDict is None:\n\t\t\treturn\n\t\tif selectionDict['name'] == 'toggle rect roi':\n\t\t\treturn\n\t\ttype = selectionDict['type']\n\t\tidx = selectionDict['idx']\n\t\tif type == 'Nodes':\n\t\t\tnodeIdx = idx\n\t\t\tself.myStackView2.selectNode(nodeIdx, snapz=True, isShift=False, doEmit=True)\n\t\telif type == 'Edges':\n\t\t\tedgeIdx = idx\n\t\t\tself.myStackView2.selectEdge(edgeIdx, snapz=True, isShift=False, doEmit=True)", "def toggle_select(self):\r\n if not len(self.items):\r\n return\r\n item = self.items[self.item_sel]\r\n if item in self.selected:\r\n self.selected.remove(item)\r\n else:\r\n self.selected.append(item)\r\n self.do_paint()", "def poll_selection(self):\r\n osName = platform.system()\r\n\r\n ## Check if the user changed the KSDK_path\r\n try:\r\n checkPath = self.widgetList[1].get()\r\n if checkPath != self.localSDK.path:\r\n self.ask_set_directory(True, 1)\r\n\r\n ## Check if user updated project name\r\n checkName = self.widgetList[4].get()\r\n if checkName != self.newProj.name:\r\n if kT.check_proj_name(checkName):\r\n self.newProj.name = checkName\r\n else:\r\n self.newProj.name = None\r\n if self.prevName != checkName:\r\n tkMessageBox.showinfo(\"Invalid Project Name\",\\\r\n \"No spaces or special characters.\")\r\n self.prevName = checkName\r\n kT.debug_log(\"Invalid name\")\r\n except AttributeError:\r\n kT.debug_log(\"Basic Changed menu\", sys.exc_info()[2])\r\n #return\r\n\r\n try:\r\n now = self.widgetList[6].curselection()\r\n if now != self.curr:\r\n if len(self.widgetList[6].curselection()) > 0:\r\n try:\r\n self.displayBoard = PhotoImage(data=self.imageList[int(now[0])])\r\n except IndexError:\r\n kT.debug_log(now[0], sys.exc_info()[2])\r\n self.widgetList[8].grid_remove()\r\n self.widgetList[8] = Button(self, \\\r\n image=self.displayBoard, \\\r\n command=lambda:\\\r\n self.web_launch(self.localSDK.brdList[\\\r\n int(self.widgetList[6].curselection()[0])]))\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].grid(row=5, column=3, columnspan=3, sticky=E+W+N+S)\r\n self.widgetList[8].bind(\"<Enter>\", \\\r\n lambda h: self.update_tips('Is this your board?\\n' + \\\r\n 'If so, ' + \\\r\n 'then clicking on the board' + \\\r\n ' image will take you to the ' + \\\r\n 'board homepage on ' + \\\r\n 'freescale.com.\\n\\n'))\r\n self.widgetList[8].bind(\"<Leave>\", \\\r\n lambda h: self.update_tips(self.defaultHelp))\r\n self.curr = now\r\n try:\r\n self.currBoard = int(self.widgetList[6].curselection()[0]) + 1\r\n # Clear out driver list and board\r\n self.newProj.board = ()\r\n self.newProj.drvList = []\r\n # Configure ksdkProj given GUI state\r\n self.localSDK.get_version()\r\n self.newProj.name = self.widgetList[4].get()\r\n self.newProj.setKsdkPath(self.localSDK.path)\r\n self.newProj.sdkVer = self.localSDK.version\r\n self.newProj.useBSP = not self.localSDK.isNewVersion()\r\n except IndexError:\r\n self.displayBoard = PhotoImage(data=kImg.boardImages['kds_icon.gif'])\r\n self.widgetList[8].config(image=self.displayBoard)\r\n self.widgetList[8].image = self.displayBoard\r\n self.widgetList[8].config(command=lambda: self.web_launch(kImg.boardImages['NoPreview.gif']))\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except IndexError:\r\n kT.debug_log(\"Index Error\", sys.exc_info()[2])\r\n #return\r\n except AttributeError:\r\n kT.debug_log(\"AttributeError\", sys.exc_info()[2])\r\n return\r\n\r\n self._retLoop = self.after(250, self.poll_selection)", "def select(self):\n save= self.currentSub._select()\n if save!=False and save.selectable == True:\n self.currentSub =save\n if save.explorable():\n try :\n save.list[save.count].onShowed()\n except:\n pass", "def apply_selection(self, rv, index, is_selected):\n self.selected = is_selected", "def on_click(self, event):\n item = self.identify(\"item\", event.x, event.y)\n if item:\n if item in self.selection():\n self.selection_remove(item)\n else:\n self.selection_add(item)\n return \"break\"", "def select_move(self, index: int):\n # if the player reclicks on the piece, they are putting it down\n if (index == self.selected_move):\n self.selected_move = -1\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = f\"N/A\"\n self._sync_gui()\n return\n # if the player has not selected a piece, and the piece they clicked on is a black team piece. Pick up the piece\n if (self.selected_move == -1) and (\n self.game_board.board[index].team_id == BLACK_TEAM_ID\n ):\n self.selected_move = index\n\n self.turn_value_text = \"You (Black)\"\n self.selected_piece_value_text = (\n f\"{index} : {self.game_board.board[index].name}\"\n )\n\n self._sync_gui()\n return\n # if they selected a piece that is not on their team to pick up\n if self.selected_move == -1:\n return\n # if the player is holding one of their pieces and they select on a valid move\n if self.game_board.validate_move(start=self.selected_move, end=index):\n self.game_board.move_pieces(start=self.selected_move, end=index)\n self.selected_move = -1\n\n self.turn_value_text = \"AI (White)\"\n self.selected_piece_value_text = f\"N/A\"\n self._sync_gui()\n time.sleep(0.5)\n self._ai_move()", "def setSelected(*args):", "def setSelected(*args):", "def requires_selection(self) -> bool:\n return True", "def _selectionChangedSlot(self, _):\r\n\r\n self._updateButtonStates()", "def onSelected(self):\n pass", "def set_piece_selected(self, uid, val):\n piece = self.get_piece_by_uid(uid)\n if piece:\n piece.selected = val" ]
[ "0.65026367", "0.64400166", "0.63710815", "0.63659126", "0.62468314", "0.62162787", "0.62162787", "0.6215016", "0.61831117", "0.61701834", "0.6158669", "0.6151914", "0.6135659", "0.61340016", "0.6130133", "0.61174387", "0.6027609", "0.60263383", "0.60083646", "0.60067785", "0.59736454", "0.59733063", "0.596729", "0.5966755", "0.5966695", "0.5966695", "0.5940521", "0.59251684", "0.58962023", "0.5890203" ]
0.7832845
0
Returns the scheduling actions based on highest Qvalues. This requires the model weights to be already saved.
def get_best_schedule(self): # load the model weights self.models = [load_model(f'dqn_{task_id}.h5') for task_id in range(len(self.models))] actions = [] is_scheduled = [0] * len(self.models) while (not all(is_scheduled)): observation = OrderedDict([('is_scheduled', is_scheduled)]) best_action = self._get_best_action(observation) actions.append(best_action) is_scheduled[best_action['task_id']] = best_action['start_time'] return actions
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def bestAction(self):\n get_q = self.getQFunction()\n maxq = -5000\n best_actions = []\n for (state, action), q in get_q.items():\n if q > maxq:\n maxq = q\n best_actions = [action]\n elif q == maxq:\n best_actions.append(action)\n return self.tuple_to_dictionary(random.choice(best_actions))", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n maxvalue = -100000000\n bestaction = None\n for action in self.mdp.getPossibleActions(state):\n valueforthisaction = self.getQValue(state, action) # is this right? \n if valueforthisaction > maxvalue:\n bestaction = action\n maxvalue = valueforthisaction\n return bestaction", "def computeActionFromQValues(self, state):\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return None\n qVals = [self.getQValue(state, a) for a in actions]\n bestActions = []\n bestVal = max(qVals)\n for i in range(len(actions)):\n if qVals[i] == bestVal:\n bestActions.append(actions[i])\n return random.choice(bestActions) #Break ties randomly", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n if not self.getLegalActions(state): return None\n\n best_action = None;\n best_value = float('-inf')\n for action in self.getLegalActions(state):\n if self.getQValue(state, action) > best_value:\n best_value = self.getQValue(state, action)\n best_action = action\n return best_action", "def getBestAction(self, state):\n best_action = 0\n max_Q = -9999\n\n for action in self.getLegalActions(state):\n Q = self.getQValue(state, action)\n if Q > max_Q:\n best_action = action\n max_Q = Q\n \n return best_action", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n # Initialize max_value as - infinity\n # Initialize best action as None, choose max_value action\n max_value = float(\"-inf\")\n computed_action = None\n\n for action in actions:\n # Find q value of specified action\n q_value = self.computeQValueFromValues(state, action)\n # Update action if it's the best so far\n if q_value > max_value:\n max_value = q_value\n computed_action = action\n return computed_action", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.getLegalActions(state)\n if len(actions) == 0:\n return None\n values = [self.getQValue(state, action) for action in actions]\n LoT = zip(values, actions)\n (bestValue, bestAction) = max(LoT)\n return bestAction", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n legal_actions = self.getLegalActions(state)\n if len(legal_actions) == 0: return None\n values = [self.getQValue(state, action) for action in legal_actions]\n max_value = max(values)\n best_indices = [index for index in range(len(values)) if values[index] == max_value]\n return legal_actions[random.choice(best_indices)]", "def execute_best_actions(self):\n while True:\n print(\"In execute_best_actions\")\n s = self.get_state_num()\n qvals = self.Q[s]\n # Get action with largest qval\n best_action = np.argmax(qvals)\n # We don't actually update with rewards,\n # but use them to know when to perform next action\n # We want to travel 0.5 m in action's direction.\n self.apply_action(best_action)\n while self.reward == None:\n rospy.sleep(0.5)\n print(\"Reward =\", self.reward)\n self.reward = None", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n max_qvalue = None\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if max_qvalue is None or max_qvalue < qvalue:\n max_qvalue = qvalue\n\n if max_qvalue is None:\n return None\n\n actions = []\n for action in self.legalActions:\n qvalue = self.getQValue(state, action)\n if qvalue == max_qvalue:\n actions.append(action)\n\n if max_qvalue is not None and len(actions) == 0:\n return self.legalActions[0]\n if len(actions) > 1:\n return Const.DO_NOTHING\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n ############################################################################################################ Eric Changed state to self.index\n \n legalActions = state.getLegalActions(self.index)\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, LEGALACTIONS: \", legalActions\n if len(legalActions) == 0:\n return None\n maxValue = self.getQValue(state, legalActions[0])\n maxAction = legalActions[0]\n\n for a in legalActions:\n myQValue = self.getQValue(state, a)\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, MYQVALUE: \", myQValue, \" MAXVALUE: \", maxValue\n if myQValue > maxValue:\n maxValue = self.getQValue(state, a)\n maxAction = a\n if myQValue == maxValue:\n if util.flipCoin(0.5):\n maxValue = self.getQValue(state, a)\n maxAction = a\n #print \"COMPUTEACTIONFROMQVALUES in QLEARNINGAGENT, MAXACTION: \", maxAction\n return maxAction\n util.raiseNotDefined()", "def _best_action(self, state):\n actions_rewards = list(self.Q[state].items())\n return max(actions_rewards, key=lambda x: x[1])[0]", "def computeActionFromValues(self, state):\n \"*** YOUR CODE HERE ***\"\n actions = self.mdp.getPossibleActions(state)\n optimalAction = None\n maxValue = float('-inf')\n for a in actions:\n qValue = self.computeQValueFromValues(state, a)\n if qValue > maxValue:\n maxValue = qValue\n optimalAction = a\n return optimalAction", "def bestAction(self, state):\n action = self.q_network.chooseBestAction(state)\n V = max(self.q_network.qValues(state))\n return action, V", "def best_actions(self, board, action_values):\n best = max if board.turn() == 1 else min\n best_val = best(value for _,value in action_values)\n return [action for action,value in action_values if value == best_val]", "def maxQ(self,state):\r\n \r\n maxQ = float('-inf')\r\n maxA = 0\r\n \r\n for a in self.actions:\r\n q = self.Q(state,a)\r\n #print(q,a)\r\n if q > maxQ:\r\n maxQ = q\r\n maxA = a\r\n return(maxQ,maxA)", "def best_action(self, actions, state):\n\n maxQvalue = self.valueFromQvalues(state, actions)\n\n if GameEnds13(state):\n return None\n else:\n maxAction = [action for action in actions if self.getQvalue(state, action) == maxQvalue]\n best_action = random.choice(maxAction)\n return best_action", "def get_max_q(self, actions, q2_state):\n\n action_values = [ qtron.forward_pass(q2_state) for qtron in actions.values() ]\n\n maxQ = max(action_values)\n\n return maxQ", "def max_Q_by_state(self, state):\n max_q = []\n\n for action in self.actions:\n if (state, action) not in self.Q:\n return self.Q_default_value\n else:\n max_q.append(self.Q[(state, action)])\n\n return max(max_q)", "def select_action(self) -> int:\n # simulation loop\n for i in range(self.iterations):\n self.__simulate(self.root, self.iterations)\n\n # action choice\n max_q = 0\n best_action = 0\n for action in actions:\n new_node = self.root.children[action]\n value = new_node.Q\n if value > max_q:\n max_q = value\n best_action = action\n return best_action", "def best_action(self):\n\n number_of_visits_children = [node.visits for node in self.root.children.values()]\n index_best_action = np.argmax(number_of_visits_children)\n\n a = list(self.root.children.values())[index_best_action].action\n return a", "def best_Q_action(self, state):\n state_Q = {}\n\n for action in self.actions:\n if (state, action) not in self.Q:\n return False\n else:\n state_Q[(state, action)] = self.Q[(state, action)]\n\n return max(state_Q.iteritems(), key=operator.itemgetter(1))[0][1]", "def sample_actions(self, qvalues):\n batch_size, n_actions = qvalues.shape\n best_actions = qvalues.argmax(axis=-1)\n\n return qvalues.argmax(axis=-1)", "def maxQ(self,state):\r\n maxA = 0\r\n maxQ = float(\"-inf\")\r\n for aCurr in self.actions:\r\n qCurr = self.Q[(state,aCurr)]\r\n if qCurr > maxQ:\r\n maxA = aCurr\r\n maxQ = qCurr \r\n return(maxQ,maxA)", "def getAction(self, gameState: GameState):\n _max = float(\"-inf\")\n action = None\n for move in gameState.getLegalActions(0):\n util = minimax(self.evaluationFunction, 1, 0,\n gameState.generateSuccessor(0, move), self.depth)\n if util > _max or _max == float(\"-inf\"):\n _max = util\n action = move\n\n return action", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)", "def computeActionFromQValues(self, state):\n \"*** YOUR CODE HERE ***\"\n\n position = state.getPacmanPosition()\n\n legal_actions = self.getLegalActions(state)\n\n if len(legal_actions) == 0:\n return None\n\n max_value = self.computeValueFromQValues(state)\n\n actions = [action for action in legal_actions if self.values[(str(position), action)] == max_value]\n\n return random.choice(actions)" ]
[ "0.767564", "0.71033466", "0.69351465", "0.6854587", "0.68315357", "0.6827179", "0.68225276", "0.67966247", "0.67770445", "0.67721236", "0.67721236", "0.67337626", "0.67147684", "0.6710782", "0.66831255", "0.6642326", "0.66293216", "0.6593363", "0.65867025", "0.65508795", "0.6509054", "0.65076995", "0.64903075", "0.64663875", "0.6447948", "0.64473164", "0.64240474", "0.64240474", "0.64240474", "0.64240474" ]
0.7105697
1